summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Pleau <jason@jpleau.ca>2018-03-30 15:31:05 -0400
committerJason Pleau <jason@jpleau.ca>2018-03-30 15:31:05 -0400
commitc48a8f0b4fd119ed26cbcccd3044b3f12b435f70 (patch)
treeb91a6922fc1ca3e2dd12632ea4cb83b133143fee
parentf504cab6b7f2fdc0c720636f405b429027f47512 (diff)
parentfa27a6cfe201f8d4241fff59aaa0867cb238122f (diff)
Update upstream source from tag 'upstream/3.2.1'
Update to upstream version '3.2.1' with Debian dir e7bab66e04e961d2b6d081e680a64e1c672c739a
-rw-r--r--PKG-INFO4
-rw-r--r--docker.egg-info/PKG-INFO4
-rw-r--r--docker.egg-info/SOURCES.txt6
-rw-r--r--docker.egg-info/requires.txt10
-rw-r--r--docker/api/build.py117
-rw-r--r--docker/api/client.py63
-rw-r--r--docker/api/config.py91
-rw-r--r--docker/api/container.py247
-rw-r--r--docker/api/daemon.py34
-rw-r--r--docker/api/exec_api.py37
-rw-r--r--docker/api/image.py135
-rw-r--r--docker/api/network.py25
-rw-r--r--docker/api/plugin.py8
-rw-r--r--docker/api/secret.py13
-rw-r--r--docker/api/service.py209
-rw-r--r--docker/api/swarm.py81
-rw-r--r--docker/api/volume.py4
-rw-r--r--docker/auth.py127
-rw-r--r--docker/client.py17
-rw-r--r--docker/constants.py3
-rw-r--r--docker/errors.py11
-rw-r--r--docker/models/configs.py69
-rw-r--r--docker/models/containers.py168
-rw-r--r--docker/models/images.py196
-rw-r--r--docker/models/networks.py35
-rw-r--r--docker/models/services.py103
-rw-r--r--docker/models/swarm.py27
-rw-r--r--docker/tls.py34
-rw-r--r--docker/transport/unixconn.py24
-rw-r--r--docker/types/__init__.py6
-rw-r--r--docker/types/containers.py170
-rw-r--r--docker/types/daemon.py62
-rw-r--r--docker/types/healthcheck.py25
-rw-r--r--docker/types/services.py298
-rw-r--r--docker/types/swarm.py92
-rw-r--r--docker/utils/__init__.py8
-rw-r--r--docker/utils/build.py304
-rw-r--r--docker/utils/config.py66
-rw-r--r--docker/utils/decorators.py6
-rw-r--r--docker/utils/socket.py7
-rw-r--r--docker/utils/utils.py106
-rw-r--r--docker/version.py2
-rw-r--r--requirements.txt4
-rw-r--r--setup.py14
-rw-r--r--test-requirements.txt5
-rw-r--r--tests/helpers.py29
-rw-r--r--tests/integration/api_build_test.py213
-rw-r--r--tests/integration/api_client_test.py44
-rw-r--r--tests/integration/api_config_test.py72
-rw-r--r--tests/integration/api_container_test.py716
-rw-r--r--tests/integration/api_exec_test.py105
-rw-r--r--tests/integration/api_healthcheck_test.py5
-rw-r--r--tests/integration/api_image_test.py104
-rw-r--r--tests/integration/api_network_test.py135
-rw-r--r--tests/integration/api_secret_test.py15
-rw-r--r--tests/integration/api_service_test.py778
-rw-r--r--tests/integration/api_swarm_test.py67
-rw-r--r--tests/integration/api_volume_test.py15
-rw-r--r--tests/integration/base.py35
-rw-r--r--tests/integration/client_test.py20
-rw-r--r--tests/integration/errors_test.py5
-rw-r--r--tests/integration/models_containers_test.py137
-rw-r--r--tests/integration/models_images_test.py55
-rw-r--r--tests/integration/models_networks_test.py10
-rw-r--r--tests/integration/models_nodes_test.py2
-rw-r--r--tests/integration/models_services_test.py236
-rw-r--r--tests/integration/models_swarm_test.py12
-rw-r--r--tests/integration/regression_test.py30
-rw-r--r--tests/unit/api_build_test.py72
-rw-r--r--tests/unit/api_container_test.py852
-rw-r--r--tests/unit/api_exec_test.py90
-rw-r--r--tests/unit/api_image_test.py52
-rw-r--r--tests/unit/api_network_test.py108
-rw-r--r--tests/unit/api_test.py98
-rw-r--r--tests/unit/api_volume_test.py77
-rw-r--r--tests/unit/auth_test.py392
-rw-r--r--tests/unit/client_test.py23
-rw-r--r--tests/unit/dockertypes_test.py303
-rw-r--r--tests/unit/errors_test.py16
-rw-r--r--tests/unit/fake_api.py51
-rw-r--r--tests/unit/fake_api_client.py4
-rw-r--r--tests/unit/models_containers_test.py62
-rw-r--r--tests/unit/models_images_test.py22
-rw-r--r--tests/unit/models_networks_test.py4
-rw-r--r--tests/unit/models_services_test.py4
-rw-r--r--tests/unit/ssladapter_test.py11
-rw-r--r--tests/unit/swarm_test.py53
-rw-r--r--tests/unit/utils_config_test.py123
-rw-r--r--tests/unit/utils_test.py472
89 files changed, 5794 insertions, 3012 deletions
diff --git a/PKG-INFO b/PKG-INFO
index 85d5adc..a02d191 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,11 +1,12 @@
Metadata-Version: 1.1
Name: docker
-Version: 2.5.1
+Version: 3.2.1
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
License: Apache License 2.0
+Description-Content-Type: UNKNOWN
Description: Docker SDK for Python
=====================
@@ -113,5 +114,6 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
index 85d5adc..a02d191 100644
--- a/docker.egg-info/PKG-INFO
+++ b/docker.egg-info/PKG-INFO
@@ -1,11 +1,12 @@
Metadata-Version: 1.1
Name: docker
-Version: 2.5.1
+Version: 3.2.1
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
License: Apache License 2.0
+Description-Content-Type: UNKNOWN
Description: Docker SDK for Python
=====================
@@ -113,5 +114,6 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
index ae54eed..9a773e6 100644
--- a/docker.egg-info/SOURCES.txt
+++ b/docker.egg-info/SOURCES.txt
@@ -22,6 +22,7 @@ docker.egg-info/top_level.txt
docker/api/__init__.py
docker/api/build.py
docker/api/client.py
+docker/api/config.py
docker/api/container.py
docker/api/daemon.py
docker/api/exec_api.py
@@ -33,6 +34,7 @@ docker/api/service.py
docker/api/swarm.py
docker/api/volume.py
docker/models/__init__.py
+docker/models/configs.py
docker/models/containers.py
docker/models/images.py
docker/models/networks.py
@@ -51,12 +53,14 @@ docker/transport/unixconn.py
docker/types/__init__.py
docker/types/base.py
docker/types/containers.py
+docker/types/daemon.py
docker/types/healthcheck.py
docker/types/networks.py
docker/types/services.py
docker/types/swarm.py
docker/utils/__init__.py
docker/utils/build.py
+docker/utils/config.py
docker/utils/decorators.py
docker/utils/fnmatch.py
docker/utils/json_stream.py
@@ -68,6 +72,7 @@ tests/helpers.py
tests/integration/__init__.py
tests/integration/api_build_test.py
tests/integration/api_client_test.py
+tests/integration/api_config_test.py
tests/integration/api_container_test.py
tests/integration/api_exec_test.py
tests/integration/api_healthcheck_test.py
@@ -115,6 +120,7 @@ tests/unit/models_resources_test.py
tests/unit/models_services_test.py
tests/unit/ssladapter_test.py
tests/unit/swarm_test.py
+tests/unit/utils_config_test.py
tests/unit/utils_json_stream_test.py
tests/unit/utils_test.py
tests/unit/testdata/certs/ca.pem
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
index b79f011..623f4c7 100644
--- a/docker.egg-info/requires.txt
+++ b/docker.egg-info/requires.txt
@@ -1,7 +1,7 @@
-requests!=2.11.0,!=2.12.2,!=2.18.0,>=2.5.2
+requests!=2.18.0,>=2.14.2
six>=1.4.0
websocket-client>=0.32.0
-docker-pycreds>=0.2.1
+docker-pycreds>=0.2.2
[:python_version < "3.3"]
ipaddress>=1.0.16
@@ -9,6 +9,12 @@ ipaddress>=1.0.16
[:python_version < "3.5"]
backports.ssl_match_hostname>=3.5
+[:sys_platform == "win32" and python_version < "3.6"]
+pypiwin32==219
+
+[:sys_platform == "win32" and python_version >= "3.6"]
+pypiwin32==220
+
[tls]
pyOpenSSL>=0.14
cryptography>=1.3.4
diff --git a/docker/api/build.py b/docker/api/build.py
index f9678a3..d69985e 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,7 +1,7 @@
import json
import logging
import os
-import re
+import random
from .. import auth
from .. import constants
@@ -14,12 +14,12 @@ log = logging.getLogger(__name__)
class BuildApiMixin(object):
def build(self, path=None, tag=None, quiet=False, fileobj=None,
- nocache=False, rm=False, stream=False, timeout=None,
+ nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
- squash=None):
+ squash=None, extra_hosts=None, platform=None, isolation=None):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
@@ -67,9 +67,6 @@ class BuildApiMixin(object):
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
- stream (bool): *Deprecated for API version > 1.8 (always True)*.
- Return a blocking generator you can iterate over to retrieve
- build output as it happens
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
@@ -93,14 +90,19 @@ class BuildApiMixin(object):
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
- cache_from (list): A list of images used for build cache
- resolution
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
Returns:
A generator for the build output.
@@ -143,23 +145,16 @@ class BuildApiMixin(object):
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
- exclude = list(filter(bool, f.read().splitlines()))
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [l.strip() for l in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
)
encoding = 'gzip' if gzip else encoding
- if utils.compare_version('1.8', self._version) >= 0:
- stream = True
-
- if dockerfile and utils.compare_version('1.17', self._version) < 0:
- raise errors.InvalidVersion(
- 'dockerfile was only introduced in API version 1.17'
- )
-
- if utils.compare_version('1.19', self._version) < 0:
- pull = 1 if pull else 0
-
u = self._url('/build')
params = {
't': tag,
@@ -174,12 +169,7 @@ class BuildApiMixin(object):
params.update(container_limits)
if buildargs:
- if utils.version_gte(self._version, '1.21'):
- params.update({'buildargs': json.dumps(buildargs)})
- else:
- raise errors.InvalidVersion(
- 'buildargs was only introduced in API version 1.21'
- )
+ params.update({'buildargs': json.dumps(buildargs)})
if shmsize:
if utils.version_gte(self._version, '1.22'):
@@ -229,35 +219,50 @@ class BuildApiMixin(object):
'squash was only introduced in API version 1.25'
)
+ if extra_hosts is not None:
+ if utils.version_lt(self._version, '1.27'):
+ raise errors.InvalidVersion(
+ 'extra_hosts was only introduced in API version 1.27'
+ )
+
+ if isinstance(extra_hosts, dict):
+ extra_hosts = utils.format_extra_hosts(extra_hosts)
+ params.update({'extrahosts': extra_hosts})
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
+
+ if isolation is not None:
+ if utils.version_lt(self._version, '1.24'):
+ raise errors.InvalidVersion(
+ 'isolation was only introduced in API version 1.24'
+ )
+ params['isolation'] = isolation
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
- if utils.compare_version('1.9', self._version) >= 0:
- self._set_auth_headers(headers)
+ self._set_auth_headers(headers)
response = self._post(
u,
data=context,
params=params,
headers=headers,
- stream=stream,
+ stream=True,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
- if stream:
- return self._stream_helper(response, decode=decode)
- else:
- output = self._result(response)
- srch = r'Successfully built ([0-9a-f]+)'
- match = re.search(srch, output)
- if not match:
- return None, output
- return match.group(1), output
+ return self._stream_helper(response, decode=decode)
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
@@ -278,14 +283,12 @@ class BuildApiMixin(object):
# Matches CLI behavior: https://github.com/docker/docker/blob/
# 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
# credentials/native_store.go#L68-L83
- for registry in self._auth_configs.keys():
- if registry == 'credsStore' or registry == 'HttpHeaders':
- continue
+ for registry in self._auth_configs.get('auths', {}).keys():
auth_data[registry] = auth.resolve_authconfig(
self._auth_configs, registry
)
else:
- auth_data = self._auth_configs.copy()
+ auth_data = self._auth_configs.get('auths', {}).copy()
# See https://github.com/docker/docker-py/issues/1683
if auth.INDEX_NAME in auth_data:
auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
@@ -296,13 +299,27 @@ class BuildApiMixin(object):
)
)
- if utils.compare_version('1.19', self._version) >= 0:
- headers['X-Registry-Config'] = auth.encode_header(
- auth_data
- )
- else:
- headers['X-Registry-Config'] = auth.encode_header({
- 'configs': auth_data
- })
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
else:
log.debug('No auth config found')
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ with open(abs_dockerfile, 'r') as df:
+ return (
+ '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ df.read()
+ )
+ else:
+ return (dockerfile, None)
diff --git a/docker/api/client.py b/docker/api/client.py
index 1de10c7..13c292a 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -1,6 +1,5 @@
import json
import struct
-import warnings
from functools import partial
import requests
@@ -9,6 +8,7 @@ import six
import websocket
from .build import BuildApiMixin
+from .config import ConfigApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
@@ -26,12 +26,12 @@ from ..constants import (
MINIMUM_DOCKER_API_VERSION
)
from ..errors import (
- DockerException, TLSParameterError,
+ DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception
)
from ..tls import TLSConfig
from ..transport import SSLAdapter, UnixAdapter
-from ..utils import utils, check_resource, update_headers
+from ..utils import utils, check_resource, update_headers, config
from ..utils.socket import frames_iter, socket_raw_iter
from ..utils.json_stream import json_stream
try:
@@ -43,6 +43,7 @@ except ImportError:
class APIClient(
requests.Session,
BuildApiMixin,
+ ConfigApiMixin,
ContainerApiMixin,
DaemonApiMixin,
ExecApiMixin,
@@ -61,21 +62,21 @@ class APIClient(
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
- {u'ApiVersion': u'1.24',
+ {u'ApiVersion': u'1.33',
u'Arch': u'amd64',
- u'BuildTime': u'2016-09-27T23:38:15.810178467+00:00',
- u'Experimental': True,
- u'GitCommit': u'45bed2c',
- u'GoVersion': u'go1.6.3',
- u'KernelVersion': u'4.4.22-moby',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
u'Os': u'linux',
- u'Version': u'1.12.2-rc1'}
+ u'Version': u'17.10.0-ce'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.26``
+ automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -85,6 +86,7 @@ class APIClient(
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
'_version',
'base_url',
'timeout']
@@ -103,7 +105,10 @@ class APIClient(
self.timeout = timeout
self.headers['User-Agent'] = user_agent
- self._auth_configs = auth.load_config()
+ self._general_configs = config.load_general_config()
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs
+ )
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
@@ -114,7 +119,9 @@ class APIClient(
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
- self.base_url = 'http+docker://localunixsocket'
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
elif base_url.startswith('npipe://'):
if not IS_WINDOWS_PLATFORM:
raise DockerException(
@@ -154,11 +161,9 @@ class APIClient(
)
)
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
- warnings.warn(
- 'The minimum API version supported is {}, but you are using '
- 'version {}. It is recommended you either upgrade Docker '
- 'Engine or use an older version of Docker SDK for '
- 'Python.'.format(MINIMUM_DOCKER_API_VERSION, self._version)
+ raise InvalidVersion(
+ 'API versions below {} are no longer supported by this '
+ 'library.'.format(MINIMUM_DOCKER_API_VERSION)
)
def _retrieve_server_version(self):
@@ -204,7 +209,7 @@ class APIClient(
'instead'.format(arg, type(arg))
)
- quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
+ quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
@@ -347,19 +352,10 @@ class APIClient(
break
yield data
- def _stream_raw_result_old(self, response):
- ''' Stream raw output for API versions below 1.6 '''
- self._raise_for_status(response)
- for line in response.iter_lines(chunk_size=1,
- decode_unicode=True):
- # filter out keep-alive new lines
- if line:
- yield line
-
- def _stream_raw_result(self, response):
- ''' Stream result for TTY-enabled container above API 1.6 '''
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
- for out in response.iter_content(chunk_size=1, decode_unicode=True):
+ for out in response.iter_content(chunk_size, decode):
yield out
def _read_from_socket(self, response, stream, tty=False):
@@ -413,11 +409,6 @@ class APIClient(
return self._get_result_tty(stream, res, self._check_is_tty(container))
def _get_result_tty(self, stream, res, is_tty):
- # Stream multi-plexing was only introduced in API v1.6. Anything
- # before that needs old-style streaming.
- if utils.compare_version('1.6', self._version) < 0:
- return self._stream_raw_result_old(res)
-
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
diff --git a/docker/api/config.py b/docker/api/config.py
new file mode 100644
index 0000000..b46b09c
--- /dev/null
+++ b/docker/api/config.py
@@ -0,0 +1,91 @@
+import base64
+
+import six
+
+from .. import utils
+
+
+class ConfigApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_config(self, name, data, labels=None):
+ """
+ Create a config
+
+ Args:
+ name (string): Name of the config
+ data (bytes): Config data to be stored
+ labels (dict): A mapping of labels to assign to the config
+
+ Returns (dict): ID of the newly created config
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ url = self._url('/configs/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_config(self, id):
+ """
+ Retrieve config metadata
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_config(self, id):
+ """
+ Remove a config
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def configs(self, filters=None):
+ """
+ List configs
+
+ Args:
+ filters (dict): A map of filters to process on the configs
+ list. Available filters: ``names``
+
+ Returns (list): A list of configs
+ """
+ url = self._url('/configs')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/container.py b/docker/api/container.py
index 918f8a3..cb97b79 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,11 +1,12 @@
import six
-import warnings
from datetime import datetime
from .. import errors
from .. import utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..types import (
- ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig
+ CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
+ NetworkingConfig
)
@@ -52,10 +53,15 @@ class ContainerApiMixin(object):
u = self._url("/containers/{0}/attach", container)
response = self._post(u, headers=headers, params=params, stream=True)
- return self._read_from_socket(
+ output = self._read_from_socket(
response, stream, self._check_is_tty(container)
)
+ if stream:
+ return CancellableStream(output, response)
+ else:
+ return output
+
@utils.check_resource('container')
def attach_socket(self, container, params=None, ws=False):
"""
@@ -66,6 +72,7 @@ class ContainerApiMixin(object):
container (str): The container to attach to.
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
+ For ``detachKeys``, ~/.docker/config.json is used by default.
ws (bool): Use websockets instead of raw HTTP.
Raises:
@@ -79,6 +86,11 @@ class ContainerApiMixin(object):
'stream': 1
}
+ if 'detachKeys' not in params \
+ and 'detachKeys' in self._general_configs:
+
+ params['detachKeys'] = self._general_configs['detachKeys']
+
if ws:
return self._attach_websocket(container, params)
@@ -139,7 +151,8 @@ class ContainerApiMixin(object):
Args:
quiet (bool): Only display numeric Ids
all (bool): Show all containers. Only running containers are shown
- by default trunc (bool): Truncate output
+ by default
+ trunc (bool): Truncate output
latest (bool): Show only the latest created container, include
non-running ones.
since (str): Show only containers created since Id or Name, include
@@ -197,50 +210,14 @@ class ContainerApiMixin(object):
x['Id'] = x['Id'][:12]
return res
- @utils.check_resource('container')
- def copy(self, container, resource):
- """
- Identical to the ``docker cp`` command. Get files/folders from the
- container.
-
- **Deprecated for API version >= 1.20.** Use
- :py:meth:`~ContainerApiMixin.get_archive` instead.
-
- Args:
- container (str): The container to copy from
- resource (str): The path within the container
-
- Returns:
- The contents of the file as a string
-
- Raises:
- :py:class:`docker.errors.APIError`
- If the server returns an error.
- """
- if utils.version_gte(self._version, '1.20'):
- warnings.warn(
- 'APIClient.copy() is deprecated for API version >= 1.20, '
- 'please use get_archive() instead',
- DeprecationWarning
- )
- res = self._post_json(
- self._url("/containers/{0}/copy", container),
- data={"Resource": resource},
- stream=True
- )
- self._raise_for_status(res)
- return res.raw
-
def create_container(self, image, command=None, hostname=None, user=None,
- detach=False, stdin_open=False, tty=False,
- mem_limit=None, ports=None, environment=None,
- dns=None, volumes=None, volumes_from=None,
+ detach=False, stdin_open=False, tty=False, ports=None,
+ environment=None, volumes=None,
network_disabled=False, name=None, entrypoint=None,
- cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=None, cpuset=None, host_config=None,
- mac_address=None, labels=None, volume_driver=None,
- stop_signal=None, networking_config=None,
- healthcheck=None, stop_timeout=None, runtime=None):
+ working_dir=None, domainname=None, host_config=None,
+ mac_address=None, labels=None, stop_signal=None,
+ networking_config=None, healthcheck=None,
+ stop_timeout=None, runtime=None):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@@ -382,27 +359,17 @@ class ContainerApiMixin(object):
return container ID
stdin_open (bool): Keep STDIN open even if not attached
tty (bool): Allocate a pseudo-TTY
- mem_limit (float or str): Memory limit. Accepts float values (which
- represent the memory limit of the created container in bytes)
- or a string with a units identification char (``100000b``,
- ``1000k``, ``128m``, ``1g``). If a string is specified without
- a units character, bytes are assumed as an intended unit.
ports (list of ints): A list of port numbers
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
- dns (:py:class:`list`): DNS name servers. Deprecated since API
- version 1.10. Use ``host_config`` instead.
volumes (str or list): List of paths inside the container to use
as volumes.
- volumes_from (:py:class:`list`): List of container names or Ids to
- get volumes from.
network_disabled (bool): Disable networking
name (str): A name for the container
entrypoint (str or list): An entrypoint
working_dir (str): Path to the working directory
domainname (str): The domain name to use for the container
- memswap_limit (int):
host_config (dict): A dictionary created with
:py:meth:`create_host_config`.
mac_address (str): The Mac Address to assign the container
@@ -410,7 +377,6 @@ class ContainerApiMixin(object):
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
- volume_driver (str): The name of a volume driver/plugin.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
stop_timeout (int): Timeout to stop the container, in seconds.
@@ -433,17 +399,12 @@ class ContainerApiMixin(object):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
- if host_config and utils.compare_version('1.15', self._version) < 0:
- raise errors.InvalidVersion(
- 'host_config is not supported in API < 1.15'
- )
-
config = self.create_container_config(
- image, command, hostname, user, detach, stdin_open, tty, mem_limit,
- ports, dns, environment, volumes, volumes_from,
- network_disabled, entrypoint, cpu_shares, working_dir, domainname,
- memswap_limit, cpuset, host_config, mac_address, labels,
- volume_driver, stop_signal, networking_config, healthcheck,
+ image, command, hostname, user, detach, stdin_open, tty,
+ ports, environment, volumes,
+ network_disabled, entrypoint, working_dir, domainname,
+ host_config, mac_address, labels,
+ stop_signal, networking_config, healthcheck,
stop_timeout, runtime
)
return self.create_container_from_config(config, name)
@@ -484,6 +445,8 @@ class ContainerApiMixin(object):
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
@@ -529,6 +492,10 @@ class ContainerApiMixin(object):
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``binds``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
@@ -685,15 +652,18 @@ class ContainerApiMixin(object):
)
@utils.check_resource('container')
- def export(self, container):
+ def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of a filesystem as a tar archive.
Args:
container (str): The container to export
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
Returns:
- (str): The filesystem tar archive
+ (generator): The archived filesystem data stream
Raises:
:py:class:`docker.errors.APIError`
@@ -702,12 +672,10 @@ class ContainerApiMixin(object):
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
- self._raise_for_status(res)
- return res.raw
+ return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
- @utils.minimum_version('1.20')
- def get_archive(self, container, path):
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@@ -715,6 +683,9 @@ class ContainerApiMixin(object):
Args:
container (str): The container where the file is located
path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -732,7 +703,7 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
- res.raw,
+ self._stream_raw_result(res, chunk_size, False),
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
@@ -781,7 +752,8 @@ class ContainerApiMixin(object):
@utils.check_resource('container')
def logs(self, container, stdout=True, stderr=True, stream=False,
- timestamps=False, tail='all', since=None, follow=None):
+ timestamps=False, tail='all', since=None, follow=None,
+ until=None):
"""
Get logs from a container. Similar to the ``docker logs`` command.
@@ -800,6 +772,8 @@ class ContainerApiMixin(object):
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
Returns:
(generator or str)
@@ -808,44 +782,51 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if utils.compare_version('1.11', self._version) >= 0:
- if follow is None:
- follow = stream
- params = {'stderr': stderr and 1 or 0,
- 'stdout': stdout and 1 or 0,
- 'timestamps': timestamps and 1 or 0,
- 'follow': follow and 1 or 0,
- }
- if utils.compare_version('1.13', self._version) >= 0:
- if tail != 'all' and (not isinstance(tail, int) or tail < 0):
- tail = 'all'
- params['tail'] = tail
-
- if since is not None:
- if utils.compare_version('1.19', self._version) < 0:
- raise errors.InvalidVersion(
- 'since is not supported in API < 1.19'
- )
- else:
- if isinstance(since, datetime):
- params['since'] = utils.datetime_to_timestamp(since)
- elif (isinstance(since, int) and since > 0):
- params['since'] = since
- else:
- raise errors.InvalidArgument(
- 'since value should be datetime or int, not {}'.
- format(type(since))
- )
- url = self._url("/containers/{0}/logs", container)
- res = self._get(url, params=params, stream=stream)
- return self._get_result(container, stream, res)
- return self.attach(
- container,
- stdout=stdout,
- stderr=stderr,
- stream=stream,
- logs=True
- )
+ if follow is None:
+ follow = stream
+ params = {'stderr': stderr and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'timestamps': timestamps and 1 or 0,
+ 'follow': follow and 1 or 0,
+ }
+ if tail != 'all' and (not isinstance(tail, int) or tail < 0):
+ tail = 'all'
+ params['tail'] = tail
+
+ if since is not None:
+ if isinstance(since, datetime):
+ params['since'] = utils.datetime_to_timestamp(since)
+ elif (isinstance(since, int) and since > 0):
+ params['since'] = since
+ else:
+ raise errors.InvalidArgument(
+ 'since value should be datetime or positive int, '
+ 'not {}'.format(type(since))
+ )
+
+ if until is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'until is not supported for API version < 1.35'
+ )
+ if isinstance(until, datetime):
+ params['until'] = utils.datetime_to_timestamp(until)
+ elif (isinstance(until, int) and until > 0):
+ params['until'] = until
+ else:
+ raise errors.InvalidArgument(
+ 'until value should be datetime or positive int, '
+ 'not {}'.format(type(until))
+ )
+
+ url = self._url("/containers/{0}/logs", container)
+ res = self._get(url, params=params, stream=stream)
+ output = self._get_result(container, stream, res)
+
+ if stream:
+ return CancellableStream(output, res)
+ else:
+ return output
@utils.check_resource('container')
def pause(self, container):
@@ -913,7 +894,6 @@ class ContainerApiMixin(object):
return h_ports
@utils.check_resource('container')
- @utils.minimum_version('1.20')
def put_archive(self, container, path, data):
"""
Insert a file or folder in an existing container using a tar archive as
@@ -983,7 +963,6 @@ class ContainerApiMixin(object):
)
self._raise_for_status(res)
- @utils.minimum_version('1.17')
@utils.check_resource('container')
def rename(self, container, name):
"""
@@ -1080,7 +1059,6 @@ class ContainerApiMixin(object):
res = self._post(url)
self._raise_for_status(res)
- @utils.minimum_version('1.17')
@utils.check_resource('container')
def stats(self, container, decode=None, stream=True):
"""
@@ -1108,20 +1086,26 @@ class ContainerApiMixin(object):
json=True)
@utils.check_resource('container')
- def stop(self, container, timeout=10):
+ def stop(self, container, timeout=None):
"""
Stops a container. Similar to the ``docker stop`` command.
Args:
container (str): The container to stop
timeout (int): Timeout in seconds to wait for the container to
- stop before sending a ``SIGKILL``. Default: 10
+ stop before sending a ``SIGKILL``. If None, then the
+ StopTimeout value of the container will be used.
+ Default: None
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- params = {'t': timeout}
+ if timeout is None:
+ params = {}
+ timeout = 10
+ else:
+ params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
@@ -1229,7 +1213,7 @@ class ContainerApiMixin(object):
return self._result(res, True)
@utils.check_resource('container')
- def wait(self, container, timeout=None):
+ def wait(self, container, timeout=None, condition=None):
"""
Block until a container stops, then return its exit code. Similar to
the ``docker wait`` command.
@@ -1238,10 +1222,13 @@ class ContainerApiMixin(object):
container (str or dict): The container to wait on. If a dict, the
``Id`` key is used.
timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
Returns:
- (int): The exit code of the container. Returns ``-1`` if the API
- responds without a ``StatusCode`` attribute.
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
@@ -1250,9 +1237,13 @@ class ContainerApiMixin(object):
If the server returns an error.
"""
url = self._url("/containers/{0}/wait", container)
- res = self._post(url, timeout=timeout)
- self._raise_for_status(res)
- json_ = res.json()
- if 'StatusCode' in json_:
- return json_['StatusCode']
- return -1
+ params = {}
+ if condition is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'wait condition is not supported for API version < 1.30'
+ )
+ params['condition'] = condition
+
+ res = self._post(url, timeout=timeout, params=params)
+ return self._result(res, True)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 285b742..fc3692c 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -1,9 +1,7 @@
import os
-import warnings
from datetime import datetime
-from .. import auth, utils
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
+from .. import auth, types, utils
class DaemonApiMixin(object):
@@ -36,8 +34,7 @@ class DaemonApiMixin(object):
the fly. False by default.
Returns:
- (generator): A blocking generator you can iterate over to retrieve
- events as they happen.
+ A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
@@ -52,6 +49,14 @@ class DaemonApiMixin(object):
u'status': u'start',
u'time': 1423339459}
...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
"""
if isinstance(since, datetime):
@@ -70,10 +75,10 @@ class DaemonApiMixin(object):
}
url = self._url('/events')
- return self._stream_helper(
- self._get(url, params=params, stream=True, timeout=None),
- decode=decode
- )
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return types.CancellableStream(stream, response)
def info(self):
"""
@@ -90,7 +95,7 @@ class DaemonApiMixin(object):
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
- reauth=False, insecure_registry=False, dockercfg_path=None):
+ reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
@@ -113,11 +118,6 @@ class DaemonApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
- DeprecationWarning
- )
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
@@ -144,7 +144,9 @@ class DaemonApiMixin(object):
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- self._auth_configs[registry or auth.INDEX_NAME] = req_data
+ if 'auths' not in self._auth_configs:
+ self._auth_configs['auths'] = {}
+ self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True)
def ping(self):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 6f42524..986d87f 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -5,11 +5,10 @@ from .. import utils
class ExecApiMixin(object):
- @utils.minimum_version('1.15')
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
- environment=None):
+ environment=None, workdir=None, detach_keys=None):
"""
Sets up an exec instance in a running container.
@@ -26,6 +25,12 @@ class ExecApiMixin(object):
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+ detach_keys (str): Override the key sequence for detaching
+ a container. Format is a single character `[a-Z]`
+ or `ctrl-<value>` where `<value>` is one of:
+ `a-z`, `@`, `^`, `[`, `,` or `_`.
+ ~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
@@ -35,14 +40,6 @@ class ExecApiMixin(object):
If the server returns an error.
"""
- if privileged and utils.version_lt(self._version, '1.19'):
- raise errors.InvalidVersion(
- 'Privileged exec is not supported in API < 1.19'
- )
- if user and utils.version_lt(self._version, '1.19'):
- raise errors.InvalidVersion(
- 'User-specific exec is not supported in API < 1.19'
- )
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25'
@@ -66,11 +63,22 @@ class ExecApiMixin(object):
'Env': environment,
}
+ if workdir is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'workdir is not supported for API version < 1.35'
+ )
+ data['WorkingDir'] = workdir
+
+ if detach_keys:
+ data['detachKeys'] = detach_keys
+ elif 'detachKeys' in self._general_configs:
+ data['detachKeys'] = self._general_configs['detachKeys']
+
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
- @utils.minimum_version('1.16')
def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
@@ -90,7 +98,6 @@ class ExecApiMixin(object):
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
- @utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
@@ -109,7 +116,6 @@ class ExecApiMixin(object):
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.minimum_version('1.15')
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
@@ -122,10 +128,13 @@ class ExecApiMixin(object):
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations.
Returns:
(generator or str): If ``stream=True``, a generator yielding
- response chunks. A string containing response data otherwise.
+ response chunks. If ``socket=True``, a socket object for the
+ connection. A string containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
diff --git a/docker/api/image.py b/docker/api/image.py
index 41cc267..5f05d88 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,11 +1,10 @@
import logging
import os
-import warnings
import six
from .. import auth, errors, utils
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
@@ -13,16 +12,18 @@ log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource('image')
- def get_image(self, image):
+ def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
Returns:
- (urllib3.response.HTTPResponse object): The response from the
- daemon.
+ (generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
@@ -30,14 +31,14 @@ class ImageApiMixin(object):
Example:
- >>> image = cli.get_image("fedora:latest")
- >>> f = open('/tmp/fedora-latest.tar', 'w')
- >>> f.write(image.data)
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
- self._raise_for_status(res)
- return res.raw
+ return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('image')
def history(self, image):
@@ -57,8 +58,7 @@ class ImageApiMixin(object):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
- def images(self, name=None, quiet=False, all=False, viz=False,
- filters=None):
+ def images(self, name=None, quiet=False, all=False, filters=None):
"""
List images. Similar to the ``docker images`` command.
@@ -79,10 +79,6 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if viz:
- if utils.compare_version('1.7', self._version) >= 0:
- raise Exception('Viz output is not supported in API >= 1.7!')
- return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
@@ -229,26 +225,13 @@ class ImageApiMixin(object):
)
@utils.check_resource('image')
- def insert(self, image, url, path):
- if utils.compare_version('1.12', self._version) >= 0:
- raise errors.DeprecatedMethod(
- 'insert is not available for API version >=1.12'
- )
- api_url = self._url("/images/{0}/insert", image)
- params = {
- 'url': url,
- 'path': path
- }
- return self._result(self._post(api_url, params=params))
-
- @utils.check_resource('image')
def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
- inspect`` command, but only for containers.
+ inspect`` command, but only for images.
Args:
- container (str): The container to inspect
+ image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
@@ -262,6 +245,27 @@ class ImageApiMixin(object):
self._get(self._url("/images/{0}/json", image)), True
)
+ @utils.minimum_version('1.30')
+ @utils.check_resource('image')
+ def inspect_distribution(self, image):
+ """
+ Get image digest and platform information by contacting the registry.
+
+ Args:
+ image (str): The image name to inspect
+
+ Returns:
+ (dict): A dict containing distribution data
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self._result(
+ self._get(self._url("/distribution/{0}/json", image)), True
+ )
+
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
@@ -305,8 +309,8 @@ class ImageApiMixin(object):
Args:
filters (dict): Filters to process on the prune list.
Available filters:
- - dangling (bool): When set to true (or 1), prune only
- unused and untagged images.
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
@@ -322,8 +326,8 @@ class ImageApiMixin(object):
params['filters'] = utils.convert_filters(filters)
return self._result(self._post(url, params=params), True)
- def pull(self, repository, tag=None, stream=False,
- insecure_registry=False, auth_config=None, decode=False):
+ def pull(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False, platform=None):
"""
Pulls an image. Similar to the ``docker pull`` command.
@@ -331,11 +335,13 @@ class ImageApiMixin(object):
repository (str): The repository to pull
tag (str): The tag to pull
stream (bool): Stream the output as a generator
- insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
Returns:
(generator or str): The output
@@ -360,12 +366,6 @@ class ImageApiMixin(object):
}
"""
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
- DeprecationWarning
- )
-
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
@@ -376,14 +376,20 @@ class ImageApiMixin(object):
}
headers = {}
- if utils.compare_version('1.5', self._version) >= 0:
- if auth_config is None:
- header = auth.get_config_header(self, registry)
- if header:
- headers['X-Registry-Auth'] = header
- else:
- log.debug('Sending supplied auth config')
- headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
response = self._post(
self._url('/images/create'), params=params, headers=headers,
@@ -397,8 +403,8 @@ class ImageApiMixin(object):
return self._result(response)
- def push(self, repository, tag=None, stream=False,
- insecure_registry=False, auth_config=None, decode=False):
+ def push(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
@@ -407,12 +413,12 @@ class ImageApiMixin(object):
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
- insecure_registry (bool): Use ``http://`` to connect to the
- registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
@@ -431,12 +437,6 @@ class ImageApiMixin(object):
...
"""
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
- DeprecationWarning
- )
-
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
@@ -446,14 +446,13 @@ class ImageApiMixin(object):
}
headers = {}
- if utils.compare_version('1.5', self._version) >= 0:
- if auth_config is None:
- header = auth.get_config_header(self, registry)
- if header:
- headers['X-Registry-Auth'] = header
- else:
- log.debug('Sending supplied auth config')
- headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
diff --git a/docker/api/network.py b/docker/api/network.py
index befbb58..57ed8d3 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -5,7 +5,6 @@ from .. import utils
class NetworkApiMixin(object):
- @minimum_version('1.21')
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker networks ls`` command.
@@ -38,7 +37,6 @@ class NetworkApiMixin(object):
res = self._get(url, params=params)
return self._result(res, json=True)
- @minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
@@ -61,6 +59,8 @@ class NetworkApiMixin(object):
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
@@ -140,6 +140,13 @@ class NetworkApiMixin(object):
data['Ingress'] = ingress
+ if scope is not None:
+ if version_lt(self._version, '1.30'):
+ raise InvalidVersion(
+ 'scope is not supported in API version < 1.30'
+ )
+ data['Scope'] = scope
+
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@@ -166,7 +173,6 @@ class NetworkApiMixin(object):
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
- @minimum_version('1.21')
@check_resource('net_id')
def remove_network(self, net_id):
"""
@@ -179,9 +185,8 @@ class NetworkApiMixin(object):
res = self._delete(url)
self._raise_for_status(res)
- @minimum_version('1.21')
@check_resource('net_id')
- def inspect_network(self, net_id, verbose=None):
+ def inspect_network(self, net_id, verbose=None, scope=None):
"""
Get detailed information about a network.
@@ -189,19 +194,24 @@ class NetworkApiMixin(object):
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
+ if scope is not None:
+ if version_lt(self._version, '1.31'):
+ raise InvalidVersion('scope was introduced in API 1.31')
+ params['scope'] = scope
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
- @minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
@@ -223,7 +233,7 @@ class NetworkApiMixin(object):
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
- (IPv4/IPv6) addresses.
+ (IPv4/IPv6) addresses.
"""
data = {
"Container": container,
@@ -238,7 +248,6 @@ class NetworkApiMixin(object):
self._raise_for_status(res)
@check_resource('container')
- @minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index 87520cc..73f1852 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -110,8 +110,8 @@ class PluginApiMixin(object):
remote (string): Remote reference for the plugin to install.
The ``:latest`` tag is optional, and is the default if
omitted.
- privileges (list): A list of privileges the user consents to
- grant to the plugin. Can be retrieved using
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
name (string): Local name for the pulled plugin. The
``:latest`` tag is optional, and is the default if omitted.
@@ -225,8 +225,8 @@ class PluginApiMixin(object):
tag is optional and is the default if omitted.
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
- privileges (list): A list of privileges the user consents to
- grant to the plugin. Can be retrieved using
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
Returns:
diff --git a/docker/api/secret.py b/docker/api/secret.py
index 1760a39..fa4c2ab 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -2,12 +2,13 @@ import base64
import six
+from .. import errors
from .. import utils
class SecretApiMixin(object):
@utils.minimum_version('1.25')
- def create_secret(self, name, data, labels=None):
+ def create_secret(self, name, data, labels=None, driver=None):
"""
Create a secret
@@ -15,6 +16,8 @@ class SecretApiMixin(object):
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
+ driver (DriverConfig): A custom driver configuration. If
+ unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
"""
@@ -30,6 +33,14 @@ class SecretApiMixin(object):
'Labels': labels
}
+ if driver is not None:
+ if utils.version_lt(self._version, '1.31'):
+ raise errors.InvalidVersion(
+ 'Secret driver is only available for API version > 1.31'
+ )
+
+ body['Driver'] = driver
+
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
diff --git a/docker/api/service.py b/docker/api/service.py
index 4b555a5..03b0ca6 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -1,48 +1,97 @@
-import warnings
from .. import auth, errors, utils
from ..types import ServiceMode
-def _check_api_features(version, task_template, update_config):
+def _check_api_features(version, task_template, update_config, endpoint_spec):
+
+ def raise_version_error(param, min_version):
+ raise errors.InvalidVersion(
+ '{} is not supported in API version < {}'.format(
+ param, min_version
+ )
+ )
+
if update_config is not None:
if utils.version_lt(version, '1.25'):
if 'MaxFailureRatio' in update_config:
- raise errors.InvalidVersion(
- 'UpdateConfig.max_failure_ratio is not supported in'
- ' API version < 1.25'
- )
+ raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
if 'Monitor' in update_config:
- raise errors.InvalidVersion(
- 'UpdateConfig.monitor is not supported in'
- ' API version < 1.25'
- )
+ raise_version_error('UpdateConfig.monitor', '1.25')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('UpdateConfig.order', '1.29')
+
+ if endpoint_spec is not None:
+ if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
+ if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
+ raise_version_error('EndpointSpec.Ports[].mode', '1.32')
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
- raise errors.InvalidVersion(
- 'force_update is not supported in API version < 1.25'
- )
+ raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
if task_template['Placement'].get('Platforms'):
- raise errors.InvalidVersion(
- 'Placement.platforms is not supported in'
- ' API version < 1.30'
- )
-
+ raise_version_error('Placement.platforms', '1.30')
if utils.version_lt(version, '1.27'):
if task_template['Placement'].get('Preferences'):
- raise errors.InvalidVersion(
- 'Placement.preferences is not supported in'
- ' API version < 1.27'
- )
- if task_template.get('ContainerSpec', {}).get('TTY'):
+ raise_version_error('Placement.preferences', '1.27')
+
+ if task_template.get('ContainerSpec'):
+ container_spec = task_template.get('ContainerSpec')
+
if utils.version_lt(version, '1.25'):
- raise errors.InvalidVersion(
- 'ContainerSpec.TTY is not supported in API version < 1.25'
- )
+ if container_spec.get('TTY'):
+ raise_version_error('ContainerSpec.tty', '1.25')
+ if container_spec.get('Hostname') is not None:
+ raise_version_error('ContainerSpec.hostname', '1.25')
+ if container_spec.get('Hosts') is not None:
+ raise_version_error('ContainerSpec.hosts', '1.25')
+ if container_spec.get('Groups') is not None:
+ raise_version_error('ContainerSpec.groups', '1.25')
+ if container_spec.get('DNSConfig') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.25')
+ if container_spec.get('Healthcheck') is not None:
+ raise_version_error('ContainerSpec.healthcheck', '1.25')
+
+ if utils.version_lt(version, '1.28'):
+ if container_spec.get('ReadOnly') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.28')
+ if container_spec.get('StopSignal') is not None:
+ raise_version_error('ContainerSpec.stop_signal', '1.28')
+
+ if utils.version_lt(version, '1.30'):
+ if container_spec.get('Configs') is not None:
+ raise_version_error('ContainerSpec.configs', '1.30')
+ if container_spec.get('Privileges') is not None:
+ raise_version_error('ContainerSpec.privileges', '1.30')
+
+ if utils.version_lt(version, '1.35'):
+ if container_spec.get('Isolation') is not None:
+ raise_version_error('ContainerSpec.isolation', '1.35')
+
+ if task_template.get('Resources'):
+ if utils.version_lt(version, '1.32'):
+ if task_template['Resources'].get('GenericResources'):
+ raise_version_error('Resources.generic_resources', '1.32')
+
+
+def _merge_task_template(current, override):
+ merged = current.copy()
+ if override is not None:
+ for ts_key, ts_value in override.items():
+ if ts_key == 'ContainerSpec':
+ if 'ContainerSpec' not in merged:
+ merged['ContainerSpec'] = {}
+ for cs_key, cs_value in override['ContainerSpec'].items():
+ if cs_value is not None:
+ merged['ContainerSpec'][cs_key] = cs_value
+ elif ts_value is not None:
+ merged[ts_key] = ts_value
+ return merged
class ServiceApiMixin(object):
@@ -78,14 +127,10 @@ class ServiceApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if endpoint_config is not None:
- warnings.warn(
- 'endpoint_config has been renamed to endpoint_spec.',
- DeprecationWarning
- )
- endpoint_spec = endpoint_config
- _check_api_features(self._version, task_template, update_config)
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
url = self._url('/services/create')
headers = {}
@@ -101,6 +146,8 @@ class ServiceApiMixin(object):
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
+ if utils.version_lt(self._version, '1.25'):
+ networks = networks or task_template.pop('Networks', None)
data = {
'Name': name,
'Labels': labels,
@@ -119,12 +166,14 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
@utils.check_resource('service')
- def inspect_service(self, service):
+ def inspect_service(self, service, insert_defaults=None):
"""
Return information about a service.
Args:
- service (str): Service name or ID
+ service (str): Service name or ID.
+ insert_defaults (boolean): If true, default values will be merged
+ into the service inspect output.
Returns:
``True`` if successful.
@@ -134,7 +183,15 @@ class ServiceApiMixin(object):
If the server returns an error.
"""
url = self._url('/services/{0}', service)
- return self._result(self._get(url), True)
+ params = {}
+ if insert_defaults is not None:
+ if utils.version_lt(self._version, '1.29'):
+ raise errors.InvalidVersion(
+ 'insert_defaults is not supported in API version < 1.29'
+ )
+ params['insertDefaults'] = insert_defaults
+
+ return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('task')
@@ -184,7 +241,8 @@ class ServiceApiMixin(object):
Args:
filters (dict): Filters to process on the nodes list. Valid
- filters: ``id`` and ``name``. Default: ``None``.
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
@@ -278,7 +336,7 @@ class ServiceApiMixin(object):
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
- endpoint_spec=None):
+ endpoint_spec=None, fetch_current_spec=False):
"""
Update a service.
@@ -300,6 +358,8 @@ class ServiceApiMixin(object):
the service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
+ fetch_current_spec (boolean): Use the undefined settings from the
+ current specification of the service. Default: ``False``
Returns:
``True`` if successful.
@@ -308,41 +368,74 @@ class ServiceApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if endpoint_config is not None:
- warnings.warn(
- 'endpoint_config has been renamed to endpoint_spec.',
- DeprecationWarning
- )
- endpoint_spec = endpoint_config
- _check_api_features(self._version, task_template, update_config)
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
+
+ if fetch_current_spec:
+ inspect_defaults = True
+ if utils.version_lt(self._version, '1.29'):
+ inspect_defaults = None
+ current = self.inspect_service(
+ service, insert_defaults=inspect_defaults
+ )['Spec']
+
+ else:
+ current = {}
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
- if name is not None:
- data['Name'] = name
- if labels is not None:
- data['Labels'] = labels
+
+ data['Name'] = current.get('Name') if name is None else name
+
+ data['Labels'] = current.get('Labels') if labels is None else labels
+
if mode is not None:
if not isinstance(mode, dict):
mode = ServiceMode(mode)
data['Mode'] = mode
- if task_template is not None:
- image = task_template.get('ContainerSpec', {}).get('Image', None)
- if image is not None:
- registry, repo_name = auth.resolve_repository_name(image)
- auth_header = auth.get_config_header(self, registry)
- if auth_header:
- headers['X-Registry-Auth'] = auth_header
- data['TaskTemplate'] = task_template
+ else:
+ data['Mode'] = current.get('Mode')
+
+ data['TaskTemplate'] = _merge_task_template(
+ current.get('TaskTemplate', {}), task_template
+ )
+
+ container_spec = data['TaskTemplate'].get('ContainerSpec', {})
+ image = container_spec.get('Image', None)
+ if image is not None:
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+
if update_config is not None:
data['UpdateConfig'] = update_config
+ else:
+ data['UpdateConfig'] = current.get('UpdateConfig')
if networks is not None:
- data['Networks'] = utils.convert_service_networks(networks)
+ converted_networks = utils.convert_service_networks(networks)
+ if utils.version_lt(self._version, '1.25'):
+ data['Networks'] = converted_networks
+ else:
+ data['TaskTemplate']['Networks'] = converted_networks
+ elif utils.version_lt(self._version, '1.25'):
+ data['Networks'] = current.get('Networks')
+ elif data['TaskTemplate'].get('Networks') is None:
+ current_task_template = current.get('TaskTemplate', {})
+ current_networks = current_task_template.get('Networks')
+ if current_networks is None:
+ current_networks = current.get('Networks')
+ if current_networks is not None:
+ data['TaskTemplate']['Networks'] = current_networks
+
if endpoint_spec is not None:
data['EndpointSpec'] = endpoint_spec
+ else:
+ data['EndpointSpec'] = current.get('EndpointSpec')
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 4fa0c4a..04595da 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,7 +1,9 @@
import logging
from six.moves import http_client
+from .. import errors
from .. import types
from .. import utils
+
log = logging.getLogger(__name__)
@@ -9,8 +11,8 @@ class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
"""
- Create a ``docker.types.SwarmSpec`` instance that can be used as the
- ``swarm_spec`` argument in
+ Create a :py:class:`docker.types.SwarmSpec` instance that can be used
+ as the ``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
@@ -29,13 +31,25 @@ class SwarmApiMixin(object):
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
- external_ca (dict): Configuration for forwarding signing requests
- to an external certificate authority. Use
- ``docker.types.SwarmExternalCA``.
+ external_cas (:py:class:`list`): Configuration for forwarding
+ signing requests to an external certificate authority. Use
+ a list of :py:class:`docker.types.SwarmExternalCA`.
name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
Returns:
- ``docker.types.SwarmSpec`` instance.
+ :py:class:`docker.types.SwarmSpec`
Raises:
:py:class:`docker.errors.APIError`
@@ -51,7 +65,20 @@ class SwarmApiMixin(object):
force_new_cluster=False, swarm_spec=spec
)
"""
- return types.SwarmSpec(*args, **kwargs)
+ ext_ca = kwargs.pop('external_ca', None)
+ if ext_ca:
+ kwargs['external_cas'] = [ext_ca]
+ return types.SwarmSpec(self._version, *args, **kwargs)
+
+ @utils.minimum_version('1.24')
+ def get_unlock_key(self):
+ """
+ Get the unlock key for this Swarm manager.
+
+ Returns:
+ A ``dict`` containing an ``UnlockKey`` member
+ """
+ return self._result(self._get(self._url('/swarm/unlockkey')), True)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
@@ -137,7 +164,7 @@ class SwarmApiMixin(object):
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
- def join_swarm(self, remote_addrs, join_token, listen_addr=None,
+ def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None):
"""
Make this Engine join a swarm that has already been created.
@@ -256,9 +283,45 @@ class SwarmApiMixin(object):
return True
@utils.minimum_version('1.24')
+ def unlock_swarm(self, key):
+ """
+ Unlock a locked swarm.
+
+ Args:
+ key (string): The unlock key as provided by
+ :py:meth:`get_unlock_key`
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the key argument is in an incompatible format
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ `True` if the request was successful.
+
+ Example:
+
+ >>> key = client.get_unlock_key()
+ >>> client.unlock_node(key)
+
+ """
+ if isinstance(key, dict):
+ if 'UnlockKey' not in key:
+ raise errors.InvalidArgument('Invalid unlock key format')
+ else:
+ key = {'UnlockKey': key}
+
+ url = self._url('/swarm/unlock')
+ res = self._post_json(url, data=key)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
"""
- Update the Node's configuration
+ Update the node's configuration
Args:
diff --git a/docker/api/volume.py b/docker/api/volume.py
index ce911c8..900a608 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -3,7 +3,6 @@ from .. import utils
class VolumeApiMixin(object):
- @utils.minimum_version('1.21')
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@@ -37,7 +36,6 @@ class VolumeApiMixin(object):
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
- @utils.minimum_version('1.21')
def create_volume(self, name=None, driver=None, driver_opts=None,
labels=None):
"""
@@ -90,7 +88,6 @@ class VolumeApiMixin(object):
return self._result(self._post_json(url, data=data), True)
- @utils.minimum_version('1.21')
def inspect_volume(self, name):
"""
Retrieve volume info by name.
@@ -138,7 +135,6 @@ class VolumeApiMixin(object):
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
- @utils.minimum_version('1.21')
def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
diff --git a/docker/auth.py b/docker/auth.py
index c3fb062..48fcd8b 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -1,18 +1,15 @@
import base64
import json
import logging
-import os
import dockerpycreds
import six
from . import errors
-from .constants import IS_WINDOWS_PLATFORM
+from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
-DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
-LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -93,22 +90,26 @@ def resolve_authconfig(authconfig, registry=None):
log.debug(
'Using credentials store "{0}"'.format(store_name)
)
- return _resolve_authconfig_credstore(
+ cfg = _resolve_authconfig_credstore(
authconfig, registry, store_name
)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
- if registry in authconfig:
+ authdict = authconfig.get('auths', {})
+ if registry in authdict:
log.debug("Found {0}".format(repr(registry)))
- return authconfig[registry]
+ return authdict[registry]
- for key, config in six.iteritems(authconfig):
+ for key, conf in six.iteritems(authdict):
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
- return config
+ return conf
log.debug("No entry found")
return None
@@ -203,7 +204,7 @@ def parse_auth(entries, raise_on_error=False):
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'
+ 'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
@@ -223,45 +224,7 @@ def parse_auth(entries, raise_on_error=False):
return conf
-def find_config_file(config_path=None):
- paths = list(filter(None, [
- config_path, # 1
- config_path_from_environment(), # 2
- os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
- os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
- ]))
-
- log.debug("Trying paths: {0}".format(repr(paths)))
-
- for path in paths:
- if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
- return path
-
- log.debug("No config file found")
-
- return None
-
-
-def config_path_from_environment():
- config_dir = os.environ.get('DOCKER_CONFIG')
- if not config_dir:
- return None
- return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
-
-
-def home_dir():
- """
- Get the user's home directory, using the same logic as the Docker Engine
- client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
- """
- if IS_WINDOWS_PLATFORM:
- return os.environ.get('USERPROFILE', '')
- else:
- return os.path.expanduser('~')
-
-
-def load_config(config_path=None):
+def load_config(config_path=None, config_dict=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
@@ -269,39 +232,45 @@ def load_config(config_path=None):
explicit config_path parameter > DOCKER_CONFIG environment variable >
~/.docker/config.json > ~/.dockercfg
"""
- config_file = find_config_file(config_path)
- if not config_file:
- return {}
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return {}
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return _load_legacy_config(config_file)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return res
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret"
+ "as auth-only file"
+ )
+ return parse_auth(config_dict)
- try:
- with open(config_file) as f:
- data = json.load(f)
- res = {}
- if data.get('auths'):
- log.debug("Found 'auths' section")
- res.update(parse_auth(data['auths'], raise_on_error=True))
- if data.get('HttpHeaders'):
- log.debug("Found 'HttpHeaders' section")
- res.update({'HttpHeaders': data['HttpHeaders']})
- if data.get('credsStore'):
- log.debug("Found 'credsStore' section")
- res.update({'credsStore': data['credsStore']})
- if data.get('credHelpers'):
- log.debug("Found 'credHelpers' section")
- res.update({'credHelpers': data['credHelpers']})
- if res:
- return res
- else:
- log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
- f.seek(0)
- return parse_auth(json.load(f))
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
+def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
diff --git a/docker/client.py b/docker/client.py
index ee361bb..b4364c3 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,5 +1,6 @@
from .api.client import APIClient
from .constants import DEFAULT_TIMEOUT_SECONDS
+from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
from .models.networks import NetworkCollection
@@ -25,7 +26,7 @@ class DockerClient(object):
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.26``
+ automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -59,7 +60,7 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.26``
+ automatically detect the server's version. Default: ``1.30``
timeout (int): Default timeout for API calls, in seconds.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
@@ -81,6 +82,14 @@ class DockerClient(object):
# Resources
@property
+ def configs(self):
+ """
+ An object for managing configs on the server. See the
+ :doc:`configs documentation <configs>` for full details.
+ """
+ return ConfigCollection(client=self)
+
+ @property
def containers(self):
"""
An object for managing containers on the server. See the
@@ -177,6 +186,10 @@ class DockerClient(object):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
+ def close(self):
+ return self.api.close()
+ close.__doc__ = APIClient.close.__doc__
+
def __getattr__(self, name):
s = ["'DockerClient' object has no attribute '{}'".format(name)]
# If a user calls a method on APIClient, they
diff --git a/docker/constants.py b/docker/constants.py
index 6de8fad..7565a76 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.30'
+DEFAULT_DOCKER_API_VERSION = '1.35'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -17,3 +17,4 @@ IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
diff --git a/docker/errors.py b/docker/errors.py
index 2a2f871..0253695 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -18,7 +18,7 @@ def create_api_error_from_http_exception(e):
try:
explanation = response.json()['message']
except ValueError:
- explanation = response.content.strip()
+ explanation = (response.content or '').strip()
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
@@ -140,7 +140,14 @@ class StreamParseError(RuntimeError):
self.msg = reason
-class BuildError(Exception):
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super(BuildError, self).__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
pass
diff --git a/docker/models/configs.py b/docker/models/configs.py
new file mode 100644
index 0000000..7f23f65
--- /dev/null
+++ b/docker/models/configs.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Config(Model):
+ """A config."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this config.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If config failed to remove.
+ """
+ return self.client.api.remove_config(self.id)
+
+
+class ConfigCollection(Collection):
+ """Configs on the Docker server."""
+ model = Config
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_config(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_config.__doc__
+
+ def get(self, config_id):
+ """
+ Get a config.
+
+ Args:
+ config_id (str): Config ID.
+
+ Returns:
+ (:py:class:`Config`): The config.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the config does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_config(config_id))
+
+ def list(self, **kwargs):
+ """
+ List configs. Similar to the ``docker config ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Config`): The configs.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.configs(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/containers.py b/docker/models/containers.py
index 688decc..1e06ed6 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -1,8 +1,13 @@
import copy
+import ntpath
+from collections import namedtuple
from ..api import APIClient
-from ..errors import (ContainerError, ImageNotFound,
- create_unexpected_kwargs_error)
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import (
+ ContainerError, DockerException, ImageNotFound,
+ create_unexpected_kwargs_error
+)
from ..types import HostConfig
from ..utils import version_gte
from .images import Image
@@ -24,7 +29,7 @@ class Container(Model):
"""
The image of the container.
"""
- image_id = self.attrs['Image']
+ image_id = self.attrs.get('ImageID', self.attrs['Image'])
if image_id is None:
return None
return self.client.images.get(image_id.split(':')[1])
@@ -34,15 +39,23 @@ class Container(Model):
"""
The labels of a container as dictionary.
"""
- result = self.attrs['Config'].get('Labels')
- return result or {}
+ try:
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+ except KeyError:
+ raise DockerException(
+ 'Label data is not available for sparse objects. Call reload()'
+ ' to retrieve all information'
+ )
@property
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
- return self.attrs['State']['Status']
+ if isinstance(self.attrs['State'], dict):
+ return self.attrs['State']['Status']
+ return self.attrs['State']
def attach(self, **kwargs):
"""
@@ -126,7 +139,7 @@ class Container(Model):
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
- socket=False, environment=None):
+ socket=False, environment=None, workdir=None):
"""
Run a command inside this container. Similar to
``docker exec``.
@@ -142,13 +155,22 @@ class Container(Model):
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
Returns:
- (generator or str): If ``stream=True``, a generator yielding
- response chunks. A string containing response data otherwise.
+ (ExecResult): A tuple of (exit_code, output)
+ exit_code: (int):
+ Exit code for the executed command or ``None`` if
+ either ``stream```or ``socket`` is ``True``.
+ output: (generator or str):
+ If ``stream=True``, a generator yielding response chunks.
+ If ``socket=True``, a socket object for the connection.
+ A string containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
@@ -156,16 +178,29 @@ class Container(Model):
"""
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
- privileged=privileged, user=user, environment=environment
+ privileged=privileged, user=user, environment=environment,
+ workdir=workdir
)
- return self.client.api.exec_start(
+ exec_output = self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
)
+ if socket or stream:
+ return ExecResult(None, exec_output)
+
+ return ExecResult(
+ self.client.api.exec_inspect(resp['Id'])['ExitCode'],
+ exec_output
+ )
- def export(self):
+ def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of the container's filesystem as a tar archive.
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
Returns:
(str): The filesystem tar archive
@@ -173,15 +208,18 @@ class Container(Model):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- return self.client.api.export(self.id)
+ return self.client.api.export(self.id, chunk_size)
- def get_archive(self, path):
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -191,7 +229,7 @@ class Container(Model):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- return self.client.api.get_archive(self.id, path)
+ return self.client.api.get_archive(self.id, path, chunk_size)
def kill(self, signal=None):
"""
@@ -225,6 +263,8 @@ class Container(Model):
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
@@ -424,10 +464,13 @@ class Container(Model):
Args:
timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
Returns:
- (int): The exit code of the container. Returns ``-1`` if the API
- responds without a ``StatusCode`` attribute.
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
@@ -491,6 +534,8 @@ class ContainerCollection(Collection):
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
@@ -549,8 +594,12 @@ class ContainerCollection(Collection):
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``volumes``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
name (str): The name for this container.
- nano_cpus (int): CPU quota in units of 10-9 CPUs.
+ nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
@@ -564,6 +613,7 @@ class ContainerCollection(Collection):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
@@ -572,6 +622,8 @@ class ContainerCollection(Collection):
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ Only used if the method needs to pull the requested image.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
@@ -622,6 +674,9 @@ class ContainerCollection(Collection):
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
+ stream (bool): If true and ``detach`` is false, return a log
+ generator instead of a string. Ignored if ``detach`` is true.
+ Default: ``False``.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
@@ -689,7 +744,10 @@ class ContainerCollection(Collection):
"""
if isinstance(image, Image):
image = image.id
- detach = kwargs.pop("detach", False)
+ stream = kwargs.pop('stream', False)
+ detach = kwargs.pop('detach', False)
+ platform = kwargs.pop('platform', None)
+
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
kwargs["auto_remove"] = True
@@ -707,7 +765,7 @@ class ContainerCollection(Collection):
container = self.create(image=image, command=command,
detach=detach, **kwargs)
except ImageNotFound:
- self.client.images.pull(image)
+ self.client.images.pull(image, platform=platform)
container = self.create(image=image, command=command,
detach=detach, **kwargs)
@@ -716,23 +774,30 @@ class ContainerCollection(Collection):
if detach:
return container
- exit_status = container.wait()
- if exit_status != 0:
- stdout = False
- stderr = True
-
logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
+ out = None
if logging_driver == 'json-file' or logging_driver == 'journald':
- out = container.logs(stdout=stdout, stderr=stderr)
- else:
+ out = container.logs(
+ stdout=stdout, stderr=stderr, stream=True, follow=True
+ )
+
+ exit_status = container.wait()['StatusCode']
+ if exit_status != 0:
out = None
+ if not kwargs.get('auto_remove'):
+ out = container.logs(stdout=False, stderr=True)
if remove:
container.remove()
if exit_status != 0:
- raise ContainerError(container, exit_status, command, image, out)
- return out
+ raise ContainerError(
+ container, exit_status, command, image, out
+ )
+
+ return out if stream or out is None else b''.join(
+ [line for line in out]
+ )
def create(self, image, command=None, **kwargs):
"""
@@ -778,7 +843,8 @@ class ContainerCollection(Collection):
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp)
- def list(self, all=False, before=None, filters=None, limit=-1, since=None):
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None,
+ sparse=False):
"""
List containers. Similar to the ``docker ps`` command.
@@ -812,6 +878,11 @@ class ContainerCollection(Collection):
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
+ sparse (bool): Do not inspect containers. Returns partial
+ information, but guaranteed not to block. Use
+ :py:meth:`Container.reload` on resulting objects to retrieve
+ all attributes. Default: ``False``
+
Returns:
(list of :py:class:`Container`)
@@ -822,7 +893,10 @@ class ContainerCollection(Collection):
resp = self.client.api.containers(all=all, before=before,
filters=filters, limit=limit,
since=since)
- return [self.get(r['Id']) for r in resp]
+ if sparse:
+ return [self.prepare_model(r) for r in resp]
+ else:
+ return [self.get(r['Id']) for r in resp]
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
@@ -866,6 +940,9 @@ RUN_HOST_CONFIG_KWARGS = [
'cpu_shares',
'cpuset_cpus',
'cpuset_mems',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
+ 'device_cgroup_rules',
'device_read_bps',
'device_read_iops',
'device_write_bps',
@@ -888,6 +965,7 @@ RUN_HOST_CONFIG_KWARGS = [
'mem_reservation',
'mem_swappiness',
'memswap_limit',
+ 'mounts',
'nano_cpus',
'network_mode',
'oom_kill_disable',
@@ -952,17 +1030,27 @@ def _create_container_args(kwargs):
# sort to make consistent for tests
create_kwargs['ports'] = [tuple(p.split('/', 1))
for p in sorted(port_bindings.keys())]
- binds = create_kwargs['host_config'].get('Binds')
- if binds:
- create_kwargs['volumes'] = [_host_volume_from_bind(v) for v in binds]
+ if volumes:
+ if isinstance(volumes, dict):
+ create_kwargs['volumes'] = [
+ v.get('bind') for v in volumes.values()
+ ]
+ else:
+ create_kwargs['volumes'] = [
+ _host_volume_from_bind(v) for v in volumes
+ ]
return create_kwargs
def _host_volume_from_bind(bind):
- bits = bind.split(':')
- if len(bits) == 1:
- return bits[0]
- elif len(bits) == 2 and bits[1] in ('ro', 'rw'):
- return bits[0]
+ drive, rest = ntpath.splitdrive(bind)
+ bits = rest.split(':', 1)
+ if len(bits) == 1 or bits[1] in ('ro', 'rw'):
+ return drive + bits[0]
else:
- return bits[1]
+ return bits[1].rstrip(':ro').rstrip(':rw')
+
+
+ExecResult = namedtuple('ExecResult', 'exit_code,output')
+""" A result of Container.exec_run with the properties ``exit_code`` and
+ ``output``. """
diff --git a/docker/models/images.py b/docker/models/images.py
index d1b29ad..d4893bb 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -1,9 +1,12 @@
+import itertools
import re
import six
from ..api import APIClient
-from ..errors import BuildError
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import BuildError, ImageLoadError, InvalidArgument
+from ..utils import parse_repository_tag
from ..utils.json_stream import json_stream
from .resource import Collection, Model
@@ -56,13 +59,17 @@ class Image(Model):
"""
return self.client.api.history(self.id)
- def save(self):
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
Returns:
- (urllib3.response.HTTPResponse object): The response from the
- daemon.
+ (generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
@@ -70,14 +77,13 @@ class Image(Model):
Example:
- >>> image = cli.images.get("fedora:latest")
- >>> resp = image.save()
- >>> f = open('/tmp/fedora-latest.tar', 'w')
- >>> for chunk in resp.stream():
- >>> f.write(chunk)
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
>>> f.close()
"""
- return self.client.api.get_image(self.id)
+ return self.client.api.get_image(self.id, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
@@ -99,6 +105,81 @@ class Image(Model):
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+class RegistryData(Model):
+ """
+ Image metadata stored on the registry, including available platforms.
+ """
+ def __init__(self, image_name, *args, **kwargs):
+ super(RegistryData, self).__init__(*args, **kwargs)
+ self.image_name = image_name
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs['Descriptor']['digest']
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ return self.id[:17]
+
+ def pull(self, platform=None):
+ """
+ Pull the image digest.
+
+ Args:
+ platform (str): The platform to pull the image for.
+ Default: ``None``
+
+ Returns:
+ (:py:class:`Image`): A reference to the pulled image.
+ """
+ repository, _ = parse_repository_tag(self.image_name)
+ return self.collection.pull(repository, tag=self.id, platform=platform)
+
+ def has_platform(self, platform):
+ """
+ Check whether the given platform identifier is available for this
+ digest.
+
+ Args:
+ platform (str or dict): A string using the ``os[/arch[/variant]]``
+ format, or a platform dictionary.
+
+ Returns:
+ (bool): ``True`` if the platform is recognized as available,
+ ``False`` otherwise.
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the platform argument is not a valid descriptor.
+ """
+ if platform and not isinstance(platform, dict):
+ parts = platform.split('/')
+ if len(parts) > 3 or len(parts) < 1:
+ raise InvalidArgument(
+ '"{0}" is not a valid platform descriptor'.format(platform)
+ )
+ platform = {'os': parts[0]}
+ if len(parts) > 2:
+ platform['variant'] = parts[2]
+ if len(parts) > 1:
+ platform['architecture'] = parts[1]
+ return normalize_platform(
+ platform, self.client.version()
+ ) in self.attrs['Platforms']
+
+ def reload(self):
+ self.attrs = self.client.api.inspect_distribution(self.image_name)
+
+ reload.__doc__ = Model.reload.__doc__
+
+
class ImageCollection(Collection):
model = Image
@@ -153,9 +234,18 @@ class ImageCollection(Collection):
Dockerfile
network_mode (str): networking mode for the run commands during
build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
Returns:
- (:py:class:`Image`): The built image.
+ (tuple): The first item is the :py:class:`Image` object for the
+ image that was build. The second item is a generator of the
+ build logs as JSON-decoded objects.
Raises:
:py:class:`docker.errors.BuildError`
@@ -170,9 +260,10 @@ class ImageCollection(Collection):
return self.get(resp)
last_event = None
image_id = None
- for chunk in json_stream(resp):
+ result_stream, internal_stream = itertools.tee(json_stream(resp))
+ for chunk in internal_stream:
if 'error' in chunk:
- raise BuildError(chunk['error'])
+ raise BuildError(chunk['error'], result_stream)
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
@@ -182,8 +273,8 @@ class ImageCollection(Collection):
image_id = match.group(2)
last_event = chunk
if image_id:
- return self.get(image_id)
- raise BuildError(last_event or 'Unknown')
+ return (self.get(image_id), result_stream)
+ raise BuildError(last_event or 'Unknown', result_stream)
def get(self, name):
"""
@@ -203,6 +294,26 @@ class ImageCollection(Collection):
"""
return self.prepare_model(self.client.api.inspect_image(name))
+ def get_registry_data(self, name):
+ """
+ Gets the registry data for an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`RegistryData`): The data object.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return RegistryData(
+ image_name=name,
+ attrs=self.client.api.inspect_distribution(name),
+ client=self.client,
+ collection=self,
+ )
+
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
@@ -236,18 +347,34 @@ class ImageCollection(Collection):
data (binary): Image data to be loaded.
Returns:
- (generator): Progress output as JSON objects
+ (list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- return self.client.api.load_image(data)
+ resp = self.client.api.load_image(data)
+ images = []
+ for chunk in resp:
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Loaded image ID: |^Loaded image: )(.+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ images.append(image_id)
+ if 'error' in chunk:
+ raise ImageLoadError(chunk['error'])
- def pull(self, name, tag=None, **kwargs):
+ return [self.get(i) for i in images]
+
+ def pull(self, repository, tag=None, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
+ If no tag is specified, all tags from that repository will be
+ pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@@ -256,14 +383,16 @@ class ImageCollection(Collection):
Args:
repository (str): The repository to pull
tag (str): The tag to pull
- insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.client.DockerClient.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
Returns:
- (:py:class:`Image`): The image that has been pulled.
+ (:py:class:`Image` or list): The image that has been pulled.
+ If no ``tag`` was specified, the method will return a list
+ of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
@@ -271,10 +400,21 @@ class ImageCollection(Collection):
Example:
- >>> image = client.images.pull('busybox')
+ >>> # Pull the image tagged `latest` in the busybox repo
+ >>> image = client.images.pull('busybox:latest')
+
+ >>> # Pull all tags in the busybox repo
+ >>> images = client.images.pull('busybox')
"""
- self.client.api.pull(name, tag=tag, **kwargs)
- return self.get('{0}:{1}'.format(name, tag) if tag else name)
+ if not tag:
+ repository, tag = parse_repository_tag(repository)
+
+ self.client.api.pull(repository, tag=tag, **kwargs)
+ if tag:
+ return self.get('{0}{2}{1}'.format(
+ repository, tag, '@' if tag.startswith('sha256:') else ':'
+ ))
+ return self.list(repository)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
@@ -291,3 +431,13 @@ class ImageCollection(Collection):
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
+
+
+def normalize_platform(platform, engine_info):
+ if platform is None:
+ platform = {}
+ if 'os' not in platform:
+ platform['os'] = engine_info['Os']
+ if 'architecture' not in platform:
+ platform['architecture'] = engine_info['Arch']
+ return platform
diff --git a/docker/models/networks.py b/docker/models/networks.py
index afb0ebe..1c2fbf2 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -1,4 +1,5 @@
from ..api import APIClient
+from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
@@ -102,15 +103,19 @@ class NetworkCollection(Collection):
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
- ipam (dict): Optional custom IP scheme for the network.
- Created with :py:class:`~docker.types.IPAMConfig`.
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
- same name. Default: ``True``.
+ same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
@@ -149,12 +154,16 @@ class NetworkCollection(Collection):
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
- def get(self, network_id):
+ def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
+ verbose (bool): Retrieve the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
Returns:
(:py:class:`Network`) The network.
@@ -167,7 +176,9 @@ class NetworkCollection(Collection):
If the server returns an error.
"""
- return self.prepare_model(self.client.api.inspect_network(network_id))
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
def list(self, *args, **kwargs):
"""
@@ -176,6 +187,13 @@ class NetworkCollection(Collection):
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
@@ -184,8 +202,13 @@ class NetworkCollection(Collection):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
- return [self.prepare_model(item) for item in resp]
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
diff --git a/docker/models/services.py b/docker/models/services.py
index e1e2ea6..125896b 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -1,6 +1,6 @@
import copy
-from docker.errors import create_unexpected_kwargs_error
-from docker.types import TaskTemplate, ContainerSpec
+from docker.errors import create_unexpected_kwargs_error, InvalidArgument
+from docker.types import TaskTemplate, ContainerSpec, ServiceMode
from .resource import Model, Collection
@@ -69,6 +69,11 @@ class Service(Model):
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
+ if kwargs.get('force_update') is True:
+ task_template = self.attrs['Spec']['TaskTemplate']
+ current_value = int(task_template.get('ForceUpdate', 0))
+ kwargs['force_update'] = current_value + 1
+
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(
@@ -105,6 +110,35 @@ class Service(Model):
)
return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
+ def scale(self, replicas):
+ """
+ Scale service container.
+
+ Args:
+ replicas (int): The number of containers that should be running.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ if 'Global' in self.attrs['Spec']['Mode'].keys():
+ raise InvalidArgument('Cannot scale a global container')
+
+ service_mode = ServiceMode('replicated', replicas)
+ return self.client.api.update_service(self.id, self.version,
+ service_mode,
+ fetch_current_spec=True)
+
+ def force_update(self):
+ """
+ Force update the service even if no changes require it.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ return self.update(force_update=True, fetch_current_spec=True)
+
class ServiceCollection(Collection):
"""Services on the Docker server."""
@@ -125,6 +159,8 @@ class ServiceCollection(Collection):
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
@@ -147,6 +183,22 @@ class ServiceCollection(Collection):
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's `hosts` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference`
+ that will be exposed to the service.
+ privileges (Privileges): Security options for the service's
+ containers.
Returns:
(:py:class:`Service`) The created service.
@@ -161,12 +213,14 @@ class ServiceCollection(Collection):
service_id = self.client.api.create_service(**create_kwargs)
return self.get(service_id)
- def get(self, service_id):
+ def get(self, service_id, insert_defaults=None):
"""
Get a service.
Args:
service_id (str): The ID of the service.
+ insert_defaults (boolean): If true, default values will be merged
+ into the output.
Returns:
(:py:class:`Service`): The service.
@@ -176,8 +230,13 @@ class ServiceCollection(Collection):
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
+ :py:class:`docker.errors.InvalidVersion`
+ If one of the arguments is not supported with the current
+ API version.
"""
- return self.prepare_model(self.client.api.inspect_service(service_id))
+ return self.prepare_model(
+ self.client.api.inspect_service(service_id, insert_defaults)
+ )
def list(self, **kwargs):
"""
@@ -185,7 +244,8 @@ class ServiceCollection(Collection):
Args:
filters (dict): Filters to process on the nodes list. Valid
- filters: ``id`` and ``name``. Default: ``None``.
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
Returns:
(list of :py:class:`Service`): The services.
@@ -202,22 +262,33 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
- 'image',
- 'command',
'args',
+ 'command',
+ 'configs',
+ 'dns_config',
'env',
+ 'groups',
+ 'healthcheck',
'hostname',
- 'workdir',
- 'user',
+ 'hosts',
+ 'image',
+ 'isolation',
'labels',
'mounts',
- 'stop_grace_period',
+ 'open_stdin',
+ 'privileges'
+ 'read_only',
'secrets',
- 'tty'
+ 'stop_grace_period',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'workdir',
]
# kwargs to copy straight over to TaskTemplate
TASK_TEMPLATE_KWARGS = [
+ 'networks',
'resources',
'restart_policy',
]
@@ -228,7 +299,6 @@ CREATE_SERVICE_KWARGS = [
'labels',
'mode',
'update_config',
- 'networks',
'endpoint_spec',
]
@@ -262,6 +332,15 @@ def _get_create_service_kwargs(func_name, kwargs):
'Options': kwargs.pop('log_driver_options', {})
}
+ if func_name == 'update':
+ if 'force_update' in kwargs:
+ task_template_kwargs['force_update'] = kwargs.pop('force_update')
+
+ # fetch the current spec by default if updating the service
+ # through the model
+ fetch_current_spec = kwargs.pop('fetch_current_spec', True)
+ create_kwargs['fetch_current_spec'] = fetch_current_spec
+
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index d3d07ee..7396e73 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -1,6 +1,5 @@
from docker.api import APIClient
from docker.errors import APIError
-from docker.types import SwarmSpec
from .resource import Model
@@ -9,6 +8,8 @@ class Swarm(Model):
The server's Swarm state. This a singleton that must be reloaded to get
the current state of the Swarm.
"""
+ id_attribute = 'ID'
+
def __init__(self, *args, **kwargs):
super(Swarm, self).__init__(*args, **kwargs)
if self.client:
@@ -28,6 +29,10 @@ class Swarm(Model):
"""
return self.attrs.get('Version').get('Index')
+ def get_unlock_key(self):
+ return self.client.api.get_unlock_key()
+ get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
+
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, **kwargs):
"""
@@ -70,6 +75,18 @@ class Swarm(Model):
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
Returns:
``True`` if the request went through.
@@ -92,7 +109,7 @@ class Swarm(Model):
'listen_addr': listen_addr,
'force_new_cluster': force_new_cluster
}
- init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
+ init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
self.client.api.init_swarm(**init_kwargs)
self.reload()
@@ -115,6 +132,10 @@ class Swarm(Model):
"""
self.attrs = self.client.api.inspect_swarm()
+ def unlock(self, key):
+ return self.client.api.unlock_swarm(key)
+ unlock.__doc__ = APIClient.unlock_swarm.__doc__
+
def update(self, rotate_worker_token=False, rotate_manager_token=False,
**kwargs):
"""
@@ -141,7 +162,7 @@ class Swarm(Model):
return self.client.api.update_swarm(
version=self.version,
- swarm_spec=SwarmSpec(**kwargs),
+ swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
rotate_manager_token=rotate_manager_token
)
diff --git a/docker/tls.py b/docker/tls.py
index 6488bbc..4900e9f 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -37,13 +37,33 @@ class TLSConfig(object):
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
- # ways: https://github.com/docker/docker-py/issues/963
-
- self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
-
- # "tls" and "tls_verify" must have both or neither cert/key files
- # In either case, Alert the user when both are expected, but any are
+ # TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
+ # depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
+ # to exclude versions. But I think that might require a bigger
+ # architectural change, so I've opted not to pursue it at this time
+
+ # If the user provides an SSL version, we should use their preference
+ if ssl_version:
+ self.ssl_version = ssl_version
+ else:
+ # If the user provides no ssl version, we should default to
+ # TLSv1_2. This option is the most secure, and will work for the
+ # majority of users with reasonably up-to-date software. However,
+ # before doing so, detect openssl version to ensure we can support
+ # it.
+ if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
+ ssl, 'PROTOCOL_TLSv1_2'):
+ # If the OpenSSL version is high enough to support TLSv1_2,
+ # then we should use it.
+ self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
+ else:
+ # Otherwise, TLS v1.0 seems to be the safest default;
+ # SSLv23 fails in mysterious ways:
+ # https://github.com/docker/docker-py/issues/963
+ self.ssl_version = ssl.PROTOCOL_TLSv1
+
+ # "tls" and "tls_verify" must have both or neither cert/key files In
+ # either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index 3565cfb..cc35d00 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -18,7 +18,19 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+class UnixHTTPResponse(httplib.HTTPResponse, object):
+ def __init__(self, sock, *args, **kwargs):
+ disable_buffering = kwargs.pop('disable_buffering', False)
+ if six.PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
+ super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
+
+
class UnixHTTPConnection(httplib.HTTPConnection, object):
+
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout
@@ -26,6 +38,7 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
+ self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -33,6 +46,17 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
sock.connect(self.unix_socket)
self.sock = sock
+ def putheader(self, header, *values):
+ super(UnixHTTPConnection, self).putheader(header, *values)
+ if header == 'Connection' and 'Upgrade' in values:
+ self.disable_buffering = True
+
+ def response_class(self, sock, *args, **kwargs):
+ if self.disable_buffering:
+ kwargs['disable_buffering'] = True
+
+ return UnixHTTPResponse(sock, *args, **kwargs)
+
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index edc919d..0b0d847 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,9 +1,11 @@
# flake8: noqa
from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
- ContainerSpec, DriverConfig, EndpointSpec, Mount, Placement, Resources,
- RestartPolicy, SecretReference, ServiceMode, TaskTemplate, UpdateConfig
+ ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
+ Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
+ ServiceMode, TaskTemplate, UpdateConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 030e292..2521420 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,11 +1,10 @@
import six
-import warnings
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
- format_environment, normalize_links, parse_bytes, parse_devices,
- split_command, version_gte, version_lt,
+ format_environment, format_extra_hosts, normalize_links, parse_bytes,
+ parse_devices, split_command, version_gte, version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
@@ -120,7 +119,9 @@ class HostConfig(dict):
isolation=None, auto_remove=False, storage_opt=None,
init=None, init_path=None, volume_driver=None,
cpu_count=None, cpu_percent=None, nano_cpus=None,
- cpuset_mems=None, runtime=None):
+ cpuset_mems=None, runtime=None, mounts=None,
+ cpu_rt_period=None, cpu_rt_runtime=None,
+ device_cgroup_rules=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@@ -129,20 +130,12 @@ class HostConfig(dict):
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
- if version_lt(version, '1.21'):
- raise host_config_version_error('mem_reservation', '1.21')
-
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
- if version_lt(version, '1.21'):
- raise host_config_version_error('kernel_memory', '1.21')
-
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
- if version_lt(version, '1.20'):
- raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
@@ -168,9 +161,6 @@ class HostConfig(dict):
self['Privileged'] = privileged
if oom_kill_disable:
- if version_lt(version, '1.20'):
- raise host_config_version_error('oom_kill_disable', '1.19')
-
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
@@ -193,7 +183,7 @@ class HostConfig(dict):
if network_mode:
self['NetworkMode'] = network_mode
- elif network_mode is None and version_gte(version, '1.20'):
+ elif network_mode is None:
self['NetworkMode'] = 'default'
if restart_policy:
@@ -214,18 +204,12 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
- if version_lt(version, '1.20'):
- raise host_config_version_error('group_add', '1.20')
-
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
- if version_lt(version, '1.21'):
- raise host_config_version_error('dns_opt', '1.21')
-
self['DnsOptions'] = dns_opt
if security_opt is not None:
@@ -257,10 +241,7 @@ class HostConfig(dict):
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
- extra_hosts = [
- '{0}:{1}'.format(k, v)
- for k, v in sorted(six.iteritems(extra_hosts))
- ]
+ extra_hosts = format_extra_hosts(extra_hosts)
self['ExtraHosts'] = extra_hosts
@@ -301,44 +282,49 @@ class HostConfig(dict):
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_quota', '1.19')
-
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_period', '1.19')
-
self['CpuPeriod'] = cpu_period
if cpu_shares:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpu_shares', '1.18')
-
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpuset_cpus', '1.18')
-
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpuset_mems', '1.19')
-
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
+ if cpu_rt_period:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_period', '1.25')
+
+ if not isinstance(cpu_rt_period, int):
+ raise host_config_type_error(
+ 'cpu_rt_period', cpu_rt_period, 'int'
+ )
+ self['CPURealtimePeriod'] = cpu_rt_period
+
+ if cpu_rt_runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_runtime', '1.25')
+
+ if not isinstance(cpu_rt_runtime, int):
+ raise host_config_type_error(
+ 'cpu_rt_runtime', cpu_rt_runtime, 'int'
+ )
+ self['CPURealtimeRuntime'] = cpu_rt_runtime
+
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
@@ -445,8 +431,6 @@ class HostConfig(dict):
self['InitPath'] = init_path
if volume_driver is not None:
- if version_lt(version, '1.21'):
- raise host_config_version_error('volume_driver', '1.21')
self['VolumeDriver'] = volume_driver
if cpu_count:
@@ -478,6 +462,20 @@ class HostConfig(dict):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
+ if mounts is not None:
+ if version_lt(version, '1.30'):
+ raise host_config_version_error('mounts', '1.30')
+ self['Mounts'] = mounts
+
+ if device_cgroup_rules is not None:
+ if version_lt(version, '1.28'):
+ raise host_config_version_error('device_cgroup_rules', '1.28')
+ if not isinstance(device_cgroup_rules, list):
+ raise host_config_type_error(
+ 'device_cgroup_rules', device_cgroup_rules, 'list'
+ )
+ self['DeviceCgroupRules'] = device_cgroup_rules
+
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@@ -498,67 +496,12 @@ def host_config_value_error(param, param_value):
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
- stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
- environment=None, volumes=None, volumes_from=None,
- network_disabled=False, entrypoint=None, cpu_shares=None,
- working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
- host_config=None, mac_address=None, labels=None, volume_driver=None,
- stop_signal=None, networking_config=None, healthcheck=None,
- stop_timeout=None, runtime=None
+ stdin_open=False, tty=False, ports=None, environment=None,
+ volumes=None, network_disabled=False, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None, mac_address=None,
+ labels=None, stop_signal=None, networking_config=None,
+ healthcheck=None, stop_timeout=None, runtime=None
):
- if version_gte(version, '1.10'):
- message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to host_config')
- if dns is not None:
- raise errors.InvalidVersion(message.format('dns'))
- if volumes_from is not None:
- raise errors.InvalidVersion(message.format('volumes_from'))
-
- if version_lt(version, '1.18'):
- if labels is not None:
- raise errors.InvalidVersion(
- 'labels were only introduced in API version 1.18'
- )
- else:
- if cpuset is not None or cpu_shares is not None:
- warnings.warn(
- 'The cpuset_cpus and cpu_shares options have been moved to'
- ' host_config in API version 1.18, and will be removed',
- DeprecationWarning
- )
-
- if version_lt(version, '1.19'):
- if volume_driver is not None:
- raise errors.InvalidVersion(
- 'Volume drivers were only introduced in API version 1.19'
- )
- mem_limit = mem_limit if mem_limit is not None else 0
- memswap_limit = memswap_limit if memswap_limit is not None else 0
- else:
- if mem_limit is not None:
- raise errors.InvalidVersion(
- 'mem_limit has been moved to host_config in API version'
- ' 1.19'
- )
-
- if memswap_limit is not None:
- raise errors.InvalidVersion(
- 'memswap_limit has been moved to host_config in API '
- 'version 1.19'
- )
-
- if version_lt(version, '1.21'):
- if stop_signal is not None:
- raise errors.InvalidVersion(
- 'stop_signal was only introduced in API version 1.21'
- )
- else:
- if volume_driver is not None:
- warnings.warn(
- 'The volume_driver option has been moved to'
- ' host_config in API version 1.21, and will be removed',
- DeprecationWarning
- )
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
@@ -589,12 +532,6 @@ class ContainerConfig(dict):
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
- if mem_limit is not None:
- mem_limit = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- memswap_limit = parse_bytes(memswap_limit)
-
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
@@ -616,13 +553,6 @@ class ContainerConfig(dict):
volumes_dict[vol] = {}
volumes = volumes_dict
- if volumes_from:
- if not isinstance(volumes_from, six.string_types):
- volumes_from = ','.join(volumes_from)
- else:
- # Force None, an empty list or dict causes client.start to fail
- volumes_from = None
-
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
@@ -647,28 +577,20 @@ class ContainerConfig(dict):
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
- 'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
- 'Dns': dns,
'Image': image,
'Volumes': volumes,
- 'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
- 'CpuShares': cpu_shares,
- 'Cpuset': cpuset,
- 'CpusetCpus': cpuset,
'WorkingDir': working_dir,
- 'MemorySwap': memswap_limit,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
- 'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
new file mode 100644
index 0000000..852f3d8
--- /dev/null
+++ b/docker/types/daemon.py
@@ -0,0 +1,62 @@
+import socket
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+class CancellableStream(object):
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration
+ except socket.error:
+ raise StopIteration
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ else:
+ sock = sock_fp._sock
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 8ea9a35..61857c2 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -4,6 +4,31 @@ import six
class Healthcheck(DictType):
+ """
+ Defines a healthcheck configuration for a container or service.
+
+ Args:
+ test (:py:class:`list` or str): Test to perform to determine
+ container health. Possible values:
+
+ - Empty list: Inherit healthcheck from parent image
+ - ``["NONE"]``: Disable healthcheck
+ - ``["CMD", args...]``: exec arguments directly.
+ - ``["CMD-SHELL", command]``: RUn command in the system's
+ default shell.
+
+ If a string is provided, it will be used as a ``CMD-SHELL``
+ command.
+ interval (int): The time to wait between checks in nanoseconds. It
+ should be 0 or at least 1000000 (1 ms).
+ timeout (int): The time to wait before considering the check to
+ have hung. It should be 0 or at least 1000000 (1 ms).
+ retries (integer): The number of consecutive failures needed to
+ consider a container as unhealthy.
+ start_period (integer): Start period for the container to
+ initialize before starting health-retries countdown in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ """
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, six.string_types):
diff --git a/docker/types/services.py b/docker/types/services.py
index 8411b70..09eb05e 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -2,7 +2,10 @@ import six
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
-from ..utils import check_resource, format_environment, split_command
+from ..utils import (
+ check_resource, format_environment, format_extra_hosts, parse_bytes,
+ split_command, convert_service_networks,
+)
class TaskTemplate(dict):
@@ -23,11 +26,14 @@ class TaskTemplate(dict):
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the containers to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
- placement=None, log_driver=None, force_update=None):
+ placement=None, log_driver=None, networks=None,
+ force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
@@ -39,6 +45,8 @@ class TaskTemplate(dict):
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
+ if networks:
+ self['Networks'] = convert_service_networks(networks)
if force_update is not None:
if not isinstance(force_update, int):
@@ -82,13 +90,33 @@ class ContainerSpec(dict):
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
- secrets (list of py:class:`SecretReference`): List of secrets to be
+ secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
made available inside the containers.
tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's ``hosts`` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference` that
+ will be exposed to the service.
+ privileges (Privileges): Security options for the service's containers.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
- stop_grace_period=None, secrets=None, tty=None):
+ stop_grace_period=None, secrets=None, tty=None, groups=None,
+ open_stdin=None, read_only=None, stop_signal=None,
+ healthcheck=None, hosts=None, dns_config=None, configs=None,
+ privileges=None, isolation=None):
self['Image'] = image
if isinstance(command, six.string_types):
@@ -107,8 +135,17 @@ class ContainerSpec(dict):
self['Dir'] = workdir
if user is not None:
self['User'] = user
+ if groups is not None:
+ self['Groups'] = groups
+ if stop_signal is not None:
+ self['StopSignal'] = stop_signal
+ if stop_grace_period is not None:
+ self['StopGracePeriod'] = stop_grace_period
if labels is not None:
self['Labels'] = labels
+ if hosts is not None:
+ self['Hosts'] = format_extra_hosts(hosts, task=True)
+
if mounts is not None:
parsed_mounts = []
for mount in mounts:
@@ -118,16 +155,33 @@ class ContainerSpec(dict):
# If mount already parsed
parsed_mounts.append(mount)
self['Mounts'] = parsed_mounts
- if stop_grace_period is not None:
- self['StopGracePeriod'] = stop_grace_period
if secrets is not None:
if not isinstance(secrets, list):
raise TypeError('secrets must be a list')
self['Secrets'] = secrets
+ if configs is not None:
+ if not isinstance(configs, list):
+ raise TypeError('configs must be a list')
+ self['Configs'] = configs
+
+ if dns_config is not None:
+ self['DNSConfig'] = dns_config
+ if privileges is not None:
+ self['Privileges'] = privileges
+ if healthcheck is not None:
+ self['Healthcheck'] = healthcheck
+
if tty is not None:
self['TTY'] = tty
+ if open_stdin is not None:
+ self['OpenStdin'] = open_stdin
+ if read_only is not None:
+ self['ReadOnly'] = read_only
+
+ if isolation is not None:
+ self['Isolation'] = isolation
class Mount(dict):
@@ -140,9 +194,11 @@ class Mount(dict):
target (string): Container path.
source (string): Mount source (e.g. a volume name or a host path).
- type (string): The mount type (``bind`` or ``volume``).
- Default: ``volume``.
+ type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
+ ``npipe``). Default: ``volume``.
read_only (bool): Whether the mount should be read-only.
+ consistency (string): The consistency requirement for the mount. One of
+ ``default```, ``consistent``, ``cached``, ``delegated``.
propagation (string): A propagation mode with the value ``[r]private``,
``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
no_copy (bool): False if the volume should be populated with the data
@@ -152,30 +208,36 @@ class Mount(dict):
for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type.
+ tmpfs_size (int or string): The size for the tmpfs mount in bytes.
+ tmpfs_mode (int): The permission mode for the tmpfs mount.
"""
def __init__(self, target, source, type='volume', read_only=False,
- propagation=None, no_copy=False, labels=None,
- driver_config=None):
+ consistency=None, propagation=None, no_copy=False,
+ labels=None, driver_config=None, tmpfs_size=None,
+ tmpfs_mode=None):
self['Target'] = target
self['Source'] = source
- if type not in ('bind', 'volume'):
+ if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
- 'Only acceptable mount types are `bind` and `volume`.'
+ 'Unsupported mount type: "{}"'.format(type)
)
self['Type'] = type
self['ReadOnly'] = read_only
+ if consistency:
+ self['Consistency'] = consistency
+
if type == 'bind':
if propagation is not None:
self['BindOptions'] = {
'Propagation': propagation
}
- if any([labels, driver_config, no_copy]):
+ if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
raise errors.InvalidArgument(
- 'Mount type is binding but volume options have been '
- 'provided.'
+ 'Incompatible options have been provided for the bind '
+ 'type mount.'
)
- else:
+ elif type == 'volume':
volume_opts = {}
if no_copy:
volume_opts['NoCopy'] = True
@@ -185,10 +247,27 @@ class Mount(dict):
volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
- if propagation:
+ if any([propagation, tmpfs_size, tmpfs_mode]):
raise errors.InvalidArgument(
- 'Mount type is volume but `propagation` argument has been '
- 'provided.'
+ 'Incompatible options have been provided for the volume '
+ 'type mount.'
+ )
+ elif type == 'tmpfs':
+ tmpfs_opts = {}
+ if tmpfs_mode:
+ if not isinstance(tmpfs_mode, six.integer_types):
+ raise errors.InvalidArgument(
+ 'tmpfs_mode must be an integer'
+ )
+ tmpfs_opts['Mode'] = tmpfs_mode
+ if tmpfs_size:
+ tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size)
+ if tmpfs_opts:
+ self['TmpfsOptions'] = tmpfs_opts
+ if any([propagation, labels, driver_config, no_copy]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the tmpfs '
+ 'type mount.'
)
@classmethod
@@ -227,9 +306,13 @@ class Resources(dict):
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
+ generic_resources (dict or :py:class:`list`): Node level generic
+ resources, for example a GPU, using the following format:
+ ``{ resource_name: resource_value }``. Alternatively, a list of
+ of resource specifications as defined by the Engine API.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
- mem_reservation=None):
+ mem_reservation=None, generic_resources=None):
limits = {}
reservation = {}
if cpu_limit is not None:
@@ -240,13 +323,42 @@ class Resources(dict):
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
-
+ if generic_resources is not None:
+ reservation['GenericResources'] = (
+ _convert_generic_resources_dict(generic_resources)
+ )
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
+def _convert_generic_resources_dict(generic_resources):
+ if isinstance(generic_resources, list):
+ return generic_resources
+ if not isinstance(generic_resources, dict):
+ raise errors.InvalidArgument(
+ 'generic_resources must be a dict or a list'
+ ' (found {})'.format(type(generic_resources))
+ )
+ resources = []
+ for kind, value in six.iteritems(generic_resources):
+ resource_type = None
+ if isinstance(value, int):
+ resource_type = 'DiscreteResourceSpec'
+ elif isinstance(value, str):
+ resource_type = 'NamedResourceSpec'
+ else:
+ raise errors.InvalidArgument(
+ 'Unsupported generic resource reservation '
+ 'type: {}'.format({kind: value})
+ )
+ resources.append({
+ resource_type: {'Kind': kind, 'Value': value}
+ })
+ return resources
+
+
class UpdateConfig(dict):
"""
@@ -265,9 +377,11 @@ class UpdateConfig(dict):
max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out an
+ updated task. Either ``start_first`` or ``stop_first`` are accepted.
"""
def __init__(self, parallelism=0, delay=None, failure_action='continue',
- monitor=None, max_failure_ratio=None):
+ monitor=None, max_failure_ratio=None, order=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
@@ -291,6 +405,13 @@ class UpdateConfig(dict):
)
self['MaxFailureRatio'] = max_failure_ratio
+ if order is not None:
+ if order not in ('start-first', 'stop-first'):
+ raise errors.InvalidArgument(
+ 'order must be either `start-first` or `stop-first`'
+ )
+ self['Order'] = order
+
class RestartConditionTypesEnum(object):
_values = (
@@ -336,8 +457,9 @@ class DriverConfig(dict):
"""
Indicates which driver to use, as well as its configuration. Can be used
as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
- and for the `driver_config` in a volume
- :py:class:`~docker.types.Mount`.
+ for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
+ as the driver object in
+ :py:meth:`create_secret`.
Args:
@@ -360,9 +482,10 @@ class EndpointSpec(dict):
balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
``'vip'`` if not provided.
ports (dict): Exposed ports that this service is accessible on from the
- outside, in the form of ``{ target_port: published_port }`` or
- ``{ target_port: (published_port, protocol) }``. Ports can only be
- provided if the ``vip`` resolution mode is used.
+ outside, in the form of ``{ published_port: target_port }`` or
+ ``{ published_port: <port_config_tuple> }``. Port config tuple format
+ is ``(target_port [, protocol [, publish_mode]])``.
+ Ports can only be provided if the ``vip`` resolution mode is used.
"""
def __init__(self, mode=None, ports=None):
if ports:
@@ -388,8 +511,15 @@ def convert_service_ports(ports):
if isinstance(v, tuple):
port_spec['TargetPort'] = v[0]
- if len(v) == 2:
+ if len(v) >= 2 and v[1] is not None:
port_spec['Protocol'] = v[1]
+ if len(v) == 3:
+ port_spec['PublishMode'] = v[2]
+ if len(v) > 3:
+ raise ValueError(
+ 'Service port configuration can have at most 3 elements: '
+ '(target_port, protocol, mode)'
+ )
else:
port_spec['TargetPort'] = v
@@ -460,17 +590,45 @@ class SecretReference(dict):
}
+class ConfigReference(dict):
+ """
+ Config reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a config is made accessible inside the service's
+ containers.
+
+ Args:
+ config_id (string): Config's ID
+ config_name (string): Config's name as defined at its creation.
+ filename (string): Name of the file containing the config. Defaults
+ to the config's name if not specified.
+ uid (string): UID of the config file's owner. Default: 0
+ gid (string): GID of the config file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('config_id')
+ def __init__(self, config_id, config_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['ConfigName'] = config_name
+ self['ConfigID'] = config_id
+ self['File'] = {
+ 'Name': filename or config_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
class Placement(dict):
"""
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
- constraints (list): A list of constraints
- preferences (list): Preferences provide a way to make the
- scheduler aware of factors such as topology. They are provided
- in order from highest to lowest precedence.
- platforms (list): A list of platforms expressed as ``(arch, os)``
- tuples
+ constraints (:py:class:`list`): A list of constraints
+ preferences (:py:class:`list`): Preferences provide a way to make
+ the scheduler aware of factors such as topology. They are
+ provided in order from highest to lowest precedence.
+ platforms (:py:class:`list`): A list of platforms expressed as
+ ``(arch, os)`` tuples
"""
def __init__(self, constraints=None, preferences=None, platforms=None):
if constraints is not None:
@@ -483,3 +641,75 @@ class Placement(dict):
self['Platforms'].append({
'Architecture': plat[0], 'OS': plat[1]
})
+
+
+class DNSConfig(dict):
+ """
+ Specification for DNS related configurations in resolver configuration
+ file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ nameservers (:py:class:`list`): The IP addresses of the name
+ servers.
+ search (:py:class:`list`): A search list for host-name lookup.
+ options (:py:class:`list`): A list of internal resolver variables
+ to be modified (e.g., ``debug``, ``ndots:3``, etc.).
+ """
+ def __init__(self, nameservers=None, search=None, options=None):
+ self['Nameservers'] = nameservers
+ self['Search'] = search
+ self['Options'] = options
+
+
+class Privileges(dict):
+ """
+ Security options for a service's containers.
+ Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ credentialspec_file (str): Load credential spec from this file.
+ The file is read by the daemon, and must be present in the
+ CredentialSpecs subdirectory in the docker data directory,
+ which defaults to ``C:\ProgramData\Docker\`` on Windows.
+ Can not be combined with credentialspec_registry.
+
+ credentialspec_registry (str): Load credential spec from this value
+ in the Windows registry. The specified registry value must be
+ located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
+ \Virtualization\Containers\CredentialSpecs``.
+ Can not be combined with credentialspec_file.
+
+ selinux_disable (boolean): Disable SELinux
+ selinux_user (string): SELinux user label
+ selinux_role (string): SELinux role label
+ selinux_type (string): SELinux type label
+ selinux_level (string): SELinux level label
+ """
+ def __init__(self, credentialspec_file=None, credentialspec_registry=None,
+ selinux_disable=None, selinux_user=None, selinux_role=None,
+ selinux_type=None, selinux_level=None):
+ credential_spec = {}
+ if credentialspec_registry is not None:
+ credential_spec['Registry'] = credentialspec_registry
+ if credentialspec_file is not None:
+ credential_spec['File'] = credentialspec_file
+
+ if len(credential_spec) > 1:
+ raise errors.InvalidArgument(
+ 'credentialspec_file and credentialspec_registry are mutually'
+ ' exclusive'
+ )
+
+ selinux_context = {
+ 'Disable': selinux_disable,
+ 'User': selinux_user,
+ 'Role': selinux_role,
+ 'Type': selinux_type,
+ 'Level': selinux_level,
+ }
+
+ if len(credential_spec) > 0:
+ self['CredentialSpec'] = credential_spec
+
+ if len(selinux_context) > 0:
+ self['SELinuxContext'] = selinux_context
diff --git a/docker/types/swarm.py b/docker/types/swarm.py
index 49beaa1..9687a82 100644
--- a/docker/types/swarm.py
+++ b/docker/types/swarm.py
@@ -1,9 +1,21 @@
+from ..errors import InvalidVersion
+from ..utils import version_lt
+
+
class SwarmSpec(dict):
- def __init__(self, task_history_retention_limit=None,
+ """
+ Describe a Swarm's configuration and options. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
+ to instantiate.
+ """
+ def __init__(self, version, task_history_retention_limit=None,
snapshot_interval=None, keep_old_snapshots=None,
log_entries_for_slow_followers=None, heartbeat_tick=None,
election_tick=None, dispatcher_heartbeat_period=None,
- node_cert_expiry=None, external_ca=None, name=None):
+ node_cert_expiry=None, external_cas=None, name=None,
+ labels=None, signing_ca_cert=None, signing_ca_key=None,
+ ca_force_rotate=None, autolock_managers=None,
+ log_driver=None):
if task_history_retention_limit is not None:
self['Orchestration'] = {
'TaskHistoryRetentionLimit': task_history_retention_limit
@@ -26,18 +38,82 @@ class SwarmSpec(dict):
'HeartbeatPeriod': dispatcher_heartbeat_period
}
- if node_cert_expiry or external_ca:
- self['CAConfig'] = {
- 'NodeCertExpiry': node_cert_expiry,
- 'ExternalCA': external_ca
- }
+ ca_config = {}
+ if node_cert_expiry is not None:
+ ca_config['NodeCertExpiry'] = node_cert_expiry
+ if external_cas:
+ if version_lt(version, '1.25'):
+ if len(external_cas) > 1:
+ raise InvalidVersion(
+ 'Support for multiple external CAs is not available '
+ 'for API version < 1.25'
+ )
+ ca_config['ExternalCA'] = external_cas[0]
+ else:
+ ca_config['ExternalCAs'] = external_cas
+ if signing_ca_key:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_key is not supported in API version < 1.30'
+ )
+ ca_config['SigningCAKey'] = signing_ca_key
+ if signing_ca_cert:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_cert is not supported in API version < 1.30'
+ )
+ ca_config['SigningCACert'] = signing_ca_cert
+ if ca_force_rotate is not None:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'force_rotate is not supported in API version < 1.30'
+ )
+ ca_config['ForceRotate'] = ca_force_rotate
+ if ca_config:
+ self['CAConfig'] = ca_config
+
+ if autolock_managers is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'autolock_managers is not supported in API version < 1.25'
+ )
+
+ self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers}
+
+ if log_driver is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'log_driver is not supported in API version < 1.25'
+ )
+
+ self['TaskDefaults'] = {'LogDriver': log_driver}
if name is not None:
self['Name'] = name
+ if labels is not None:
+ self['Labels'] = labels
class SwarmExternalCA(dict):
- def __init__(self, url, protocol=None, options=None):
+ """
+ Configuration for forwarding signing requests to an external
+ certificate authority.
+
+ Args:
+ url (string): URL where certificate signing requests should be
+ sent.
+ protocol (string): Protocol for communication with the external CA.
+ options (dict): An object with key/value pairs that are interpreted
+ as protocol-specific options for the external CA driver.
+ ca_cert (string): The root CA certificate (in PEM format) this
+ external CA uses to issue TLS certificates (assumed to be to
+ the current swarm root CA certificate if not provided).
+
+
+
+ """
+ def __init__(self, url, protocol=None, options=None, ca_cert=None):
self['URL'] = url
self['Protocol'] = protocol
self['Options'] = options
+ self['CACert'] = ca_cert
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index b758cbd..81c8186 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,13 +1,13 @@
# flake8: noqa
-from .build import tar, exclude_paths
+from .build import create_archive, exclude_paths, mkbuildcontext, tar
from .decorators import check_resource, minimum_version, update_headers
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
- mkbuildcontext, parse_repository_tag, parse_host,
+ parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
- create_host_config, parse_bytes, ping_registry, parse_env_file, version_lt,
+ create_host_config, parse_bytes, parse_env_file, version_lt,
version_gte, decode_json_header, split_command, create_ipam_config,
create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
- format_environment, create_archive
+ format_environment, format_extra_hosts
)
diff --git a/docker/utils/build.py b/docker/utils/build.py
index d4223e7..b644c9f 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -1,17 +1,34 @@
+import io
import os
+import re
+import six
+import tarfile
+import tempfile
from ..constants import IS_WINDOWS_PLATFORM
-from .fnmatch import fnmatch
-from .utils import create_archive
+from fnmatch import fnmatch
+from itertools import chain
+
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
root = os.path.abspath(path)
exclude = exclude or []
-
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
return create_archive(
- files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
- root=root, fileobj=fileobj, gzip=gzip
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
)
@@ -23,121 +40,180 @@ def exclude_paths(root, patterns, dockerfile=None):
All paths returned are relative to the root.
"""
+
if dockerfile is None:
dockerfile = 'Dockerfile'
- patterns = [p.lstrip('/') for p in patterns]
- exceptions = [p for p in patterns if p.startswith('!')]
-
- include_patterns = [p[1:] for p in exceptions]
- include_patterns += [dockerfile, '.dockerignore']
-
- exclude_patterns = list(set(patterns) - set(exceptions))
-
- paths = get_paths(root, exclude_patterns, include_patterns,
- has_exceptions=len(exceptions) > 0)
-
- return set(paths).union(
- # If the Dockerfile is in a subdirectory that is excluded, get_paths
- # will not descend into it and the file will be skipped. This ensures
- # it doesn't happen.
- set([dockerfile.replace('/', os.path.sep)])
- if os.path.exists(os.path.join(root, dockerfile)) else set()
- )
-
-
-def should_include(path, exclude_patterns, include_patterns):
+ def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+ def normalize(p):
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ patterns = (
+ (True, normalize(p[1:]))
+ if p.startswith('!') else
+ (False, normalize(p))
+ for p in patterns)
+ patterns = list(reversed(list(chain(
+ # Exclude empty patterns such as "." or the empty string.
+ filter(lambda p: p[1], patterns),
+ # Always include the Dockerfile and .dockerignore
+ [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
+ return set(walk(root, patterns))
+
+
+def walk(root, patterns, default=True):
"""
- Given a path, a list of exclude patterns, and a list of inclusion patterns:
-
- 1. Returns True if the path doesn't match any exclusion pattern
- 2. Returns False if the path matches an exclusion pattern and doesn't match
- an inclusion pattern
- 3. Returns true if the path matches an exclusion pattern and matches an
- inclusion pattern
+ A collection of file lying below root that should be included according to
+ patterns.
"""
- for pattern in exclude_patterns:
- if match_path(path, pattern):
- for pattern in include_patterns:
- if match_path(path, pattern):
- return True
- return False
- return True
-
-def should_check_directory(directory_path, exclude_patterns, include_patterns):
- """
- Given a directory path, a list of exclude patterns, and a list of inclusion
- patterns:
-
- 1. Returns True if the directory path should be included according to
- should_include.
- 2. Returns True if the directory path is the prefix for an inclusion
- pattern
- 3. Returns False otherwise
- """
-
- # To account for exception rules, check directories if their path is a
- # a prefix to an inclusion pattern. This logic conforms with the current
- # docker logic (2016-10-27):
- # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
-
- def normalize_path(path):
- return path.replace(os.path.sep, '/')
-
- path_with_slash = normalize_path(directory_path) + '/'
- possible_child_patterns = [
- pattern for pattern in map(normalize_path, include_patterns)
- if (pattern + '/').startswith(path_with_slash)
- ]
- directory_included = should_include(
- directory_path, exclude_patterns, include_patterns
- )
- return directory_included or len(possible_child_patterns) > 0
-
-
-def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
- paths = []
-
- for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
- parent = os.path.relpath(parent, root)
- if parent == '.':
- parent = ''
-
- # Remove excluded patterns from the list of directories to traverse
- # by mutating the dirs we're iterating over.
- # This looks strange, but is considered the correct way to skip
- # traversal. See https://docs.python.org/2/library/os.html#os.walk
- dirs[:] = [
- d for d in dirs if should_check_directory(
- os.path.join(parent, d), exclude_patterns, include_patterns
+ def match(p):
+ if p[1][0] == '**':
+ rec = (p[0], p[1][1:])
+ return [p] + (match(rec) if rec[1] else [rec])
+ elif fnmatch(f, p[1][0]):
+ return [(p[0], p[1][1:])]
+ else:
+ return []
+
+ for f in os.listdir(root):
+ cur = os.path.join(root, f)
+ # The patterns if recursing in that directory.
+ sub = list(chain(*(match(p) for p in patterns)))
+ # Whether this file is explicitely included / excluded.
+ hit = next((p[0] for p in sub if not p[1]), None)
+ # Whether this file is implicitely included / excluded.
+ matched = default if hit is None else hit
+ sub = list(filter(lambda p: p[1], sub))
+ if os.path.isdir(cur) and not os.path.islink(cur):
+ # Entirely skip directories if there are no chance any subfile will
+ # be included.
+ if all(not p[0] for p in sub) and not matched:
+ continue
+ # I think this would greatly speed up dockerignore handling by not
+ # recursing into directories we are sure would be entirely
+ # included, and only yielding the directory itself, which will be
+ # recursively archived anyway. However the current unit test expect
+ # the full list of subfiles and I'm not 100% sure it would make no
+ # difference yet.
+ # if all(p[0] for p in sub) and matched:
+ # yield f
+ # continue
+ children = False
+ for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
+ yield r
+ children = True
+ # The current unit tests expect directories only under those
+ # conditions. It might be simplifiable though.
+ if (not sub or not children) and hit or hit is None and default:
+ yield f
+ elif matched:
+ yield f
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
)
- ]
-
- for path in dirs:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- for path in files:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- return paths
-
-
-def match_path(path, pattern):
- pattern = pattern.rstrip('/' + os.path.sep)
- if pattern:
- pattern = os.path.relpath(pattern)
-
- pattern_components = pattern.split(os.path.sep)
- if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM:
- pattern_components = pattern.split('/')
- if '**' not in pattern:
- path_components = path.split(os.path.sep)[:len(pattern_components)]
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = set(e[0] for e in extra_files)
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ raise IOError(
+ 'Can not read file in context: {}'.format(full_path)
+ )
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ info.size = len(contents)
+ t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ if six.PY3:
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ else:
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
else:
- path_components = path.split(os.path.sep)
- return fnmatch('/'.join(path_components), '/'.join(pattern_components))
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
diff --git a/docker/utils/config.py b/docker/utils/config.py
new file mode 100644
index 0000000..82a0e2a
--- /dev/null
+++ b/docker/utils/config.py
@@ -0,0 +1,66 @@
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug("Trying paths: {0}".format(repr(paths)))
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug("Found file at path: {0}".format(path))
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (IOError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 5e195c0..c975d4b 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -38,10 +38,10 @@ def minimum_version(version):
def update_headers(f):
def inner(self, *args, **kwargs):
- if 'HttpHeaders' in self._auth_configs:
+ if 'HttpHeaders' in self._general_configs:
if not kwargs.get('headers'):
- kwargs['headers'] = self._auth_configs['HttpHeaders']
+ kwargs['headers'] = self._general_configs['HttpHeaders']
else:
- kwargs['headers'].update(self._auth_configs['HttpHeaders'])
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 54392d2..0945f0a 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -22,8 +22,7 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- # wait for data to become available
- if not isinstance(socket, NpipeSocket):
+ if six.PY3 and not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
@@ -59,7 +58,7 @@ def next_frame_size(socket):
try:
data = read_exactly(socket, 8)
except SocketError:
- return 0
+ return -1
_, actual = struct.unpack('>BxxxL', data)
return actual
@@ -71,7 +70,7 @@ def frames_iter(socket):
"""
while True:
n = next_frame_size(socket)
- if n == 0:
+ if n < 0:
break
while n > 0:
result = read(socket, n)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index d9a6d7c..fe3b9a5 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,19 +1,13 @@
import base64
-import io
import os
import os.path
import json
import shlex
-import tarfile
-import tempfile
-import warnings
from distutils.version import StrictVersion
from datetime import datetime
-import requests
import six
-from .. import constants
from .. import errors
from .. import tls
@@ -48,29 +42,6 @@ def create_ipam_config(*args, **kwargs):
)
-def mkbuildcontext(dockerfile):
- f = tempfile.NamedTemporaryFile()
- t = tarfile.open(mode='w', fileobj=f)
- if isinstance(dockerfile, io.StringIO):
- dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
- elif isinstance(dockerfile, io.BytesIO):
- dfinfo = tarfile.TarInfo('Dockerfile')
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
- else:
- dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
- t.addfile(dfinfo, dockerfile)
- t.close()
- f.seek(0)
- return f
-
-
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
@@ -78,48 +49,6 @@ def decode_json_header(header):
return json.loads(data)
-def build_file_list(root):
- files = []
- for dirname, dirnames, fnames in os.walk(root):
- for filename in fnames + dirnames:
- longpath = os.path.join(dirname, filename)
- files.append(
- longpath.replace(root, '', 1).lstrip('/')
- )
-
- return files
-
-
-def create_archive(root, files=None, fileobj=None, gzip=False):
- if not fileobj:
- fileobj = tempfile.NamedTemporaryFile()
- t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
- if files is None:
- files = build_file_list(root)
- for path in files:
- i = t.gettarinfo(os.path.join(root, path), arcname=path)
- if i is None:
- # This happens when we encounter a socket file. We can safely
- # ignore it and proceed.
- continue
-
- if constants.IS_WINDOWS_PLATFORM:
- # Windows doesn't keep track of the execute bit, so we make files
- # and directories executable by default.
- i.mode = i.mode & 0o755 | 0o111
-
- try:
- # We open the file object in binary mode for Windows support.
- with open(os.path.join(root, path), 'rb') as f:
- t.addfile(i, f)
- except IOError:
- # When we encounter a directory the file object is set to None.
- t.addfile(i, None)
- t.close()
- fileobj.seek(0)
- return fileobj
-
-
def compare_version(v1, v2):
"""Compare docker versions
@@ -150,29 +79,6 @@ def version_gte(v1, v2):
return not version_lt(v1, v2)
-def ping_registry(url):
- warnings.warn(
- 'The `ping_registry` method is deprecated and will be removed.',
- DeprecationWarning
- )
-
- return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
-
-
-def ping(url, valid_4xx_statuses=None):
- try:
- res = requests.get(url, timeout=3)
- except Exception:
- return False
- else:
- # We don't send yet auth headers
- # and a v2 registry will respond with status 401
- return (
- res.status_code < 400 or
- (valid_4xx_statuses and res.status_code in valid_4xx_statuses)
- )
-
-
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
@@ -564,6 +470,18 @@ def format_environment(environment):
return [format_env(*var) for var in six.iteritems(environment)]
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+ return [
+ '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
diff --git a/docker/version.py b/docker/version.py
index 273270d..28dd1ea 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "2.5.1"
+version = "3.2.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/requirements.txt b/requirements.txt
index f3c61e7..2b281ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@ asn1crypto==0.22.0
backports.ssl-match-hostname==3.5.0.1
cffi==1.10.0
cryptography==1.9
-docker-pycreds==0.2.1
+docker-pycreds==0.2.2
enum34==1.1.6
idna==2.5
ipaddress==1.0.18
@@ -11,6 +11,8 @@ packaging==16.8
pycparser==2.17
pyOpenSSL==17.0.0
pyparsing==2.2.0
+pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
+pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
requests==2.14.2
six==1.10.0
websocket-client==0.40.0
diff --git a/setup.py b/setup.py
index 4a33c8d..271d94f 100644
--- a/setup.py
+++ b/setup.py
@@ -20,15 +20,12 @@ ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2, != 2.11.0, != 2.12.2, != 2.18.0',
+ 'requests >= 2.14.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
- 'docker-pycreds >= 0.2.1'
+ 'docker-pycreds >= 0.2.2'
]
-if sys.platform == 'win32':
- requirements.append('pypiwin32 >= 219')
-
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
@@ -36,6 +33,12 @@ extras_require = {
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
+ # win32 APIs if on Windows (required for npipe support)
+ # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
+ # on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
+ ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
+ ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
+
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
@@ -87,6 +90,7 @@ setup(
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
diff --git a/test-requirements.txt b/test-requirements.txt
index 460db10..09680b6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,5 +1,6 @@
+coverage==3.7.1
+flake8==3.4.1
mock==1.0.1
pytest==2.9.1
-coverage==3.7.1
pytest-cov==2.1.0
-flake8==2.4.1
+pytest-timeout==1.2.1
diff --git a/tests/helpers.py b/tests/helpers.py
index 124ae2d..b6b493b 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -5,6 +5,9 @@ import random
import tarfile
import tempfile
import time
+import re
+import six
+import socket
import docker
import pytest
@@ -102,3 +105,29 @@ def force_leave_swarm(client):
def swarm_listen_addr():
return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
+
+
+def assert_cat_socket_detached_with_keys(sock, inputs):
+ if six.PY3 and hasattr(sock, '_sock'):
+ sock = sock._sock
+
+ for i in inputs:
+ sock.sendall(i)
+ time.sleep(0.5)
+
+ # If we're using a Unix socket, the sock.send call will fail with a
+ # BrokenPipeError ; INET sockets will just stop receiving / sending data
+ # but will not raise an error
+ if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
+ with pytest.raises(socket.error):
+ sock.sendall(b'make sure the socket is closed\n')
+ else:
+ sock.sendall(b"make sure the socket is closed\n")
+ assert sock.recv(32) == b''
+
+
+def ctrl_with(char):
+ if re.match('[a-z]', char):
+ return chr(ord(char) - ord('a') + 1).encode('ascii')
+ else:
+ raise(Exception('char must be [a-z]'))
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
index d0aa5c2..8910eb7 100644
--- a/tests/integration/api_build_test.py
+++ b/tests/integration/api_build_test.py
@@ -8,8 +8,8 @@ from docker import errors
import pytest
import six
-from .base import BaseAPIIntegrationTest
-from ..helpers import requires_api_version, requires_experimental
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import random_name, requires_api_version, requires_experimental
class BuildTest(BaseAPIIntegrationTest):
@@ -21,7 +21,7 @@ class BuildTest(BaseAPIIntegrationTest):
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
- stream = self.client.build(fileobj=script, stream=True, decode=True)
+ stream = self.client.build(fileobj=script, decode=True)
logs = []
for chunk in stream:
logs.append(chunk)
@@ -37,15 +37,14 @@ class BuildTest(BaseAPIIntegrationTest):
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
- stream = self.client.build(fileobj=script, stream=True)
+ stream = self.client.build(fileobj=script)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
- self.assertNotEqual(logs, '')
+ assert logs != ''
- @requires_api_version('1.8')
def test_build_with_dockerignore(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
@@ -62,12 +61,16 @@ class BuildTest(BaseAPIIntegrationTest):
'Dockerfile',
'.dockerignore',
'!ignored/subdir/excepted-file',
- '', # empty line
+ '', # empty line,
+ '#*', # comment line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
+ with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
+ f.write('this file should not be ignored')
+
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
@@ -92,13 +95,12 @@ class BuildTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
- self.assertEqual(
- sorted(list(filter(None, logs.split('\n')))),
- sorted(['/test/ignored/subdir/excepted-file',
- '/test/not-ignored']),
- )
+ assert sorted(list(filter(None, logs.split('\n')))) == sorted([
+ '/test/#file.txt',
+ '/test/ignored/subdir/excepted-file',
+ '/test/not-ignored'
+ ])
- @requires_api_version('1.21')
def test_build_with_buildargs(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
@@ -114,7 +116,7 @@ class BuildTest(BaseAPIIntegrationTest):
pass
info = self.client.inspect_image('buildargs')
- self.assertEqual(info['Config']['User'], 'OK')
+ assert info['Config']['User'] == 'OK'
@requires_api_version('1.22')
def test_build_shmsize(self):
@@ -136,6 +138,21 @@ class BuildTest(BaseAPIIntegrationTest):
# There is currently no way to get the shmsize
# that was used to build the image
+ @requires_api_version('1.24')
+ def test_build_isolation(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Deaf To All But The Song\''
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='isolation',
+ isolation='default'
+ )
+
+ for chunk in stream:
+ pass
+
@requires_api_version('1.23')
def test_build_labels(self):
script = io.BytesIO('\n'.join([
@@ -152,7 +169,7 @@ class BuildTest(BaseAPIIntegrationTest):
pass
info = self.client.inspect_image('labels')
- self.assertEqual(info['Config']['Labels'], labels)
+ assert info['Config']['Labels'] == labels
@requires_api_version('1.25')
def test_build_with_cache_from(self):
@@ -210,25 +227,35 @@ class BuildTest(BaseAPIIntegrationTest):
pass
info = self.client.inspect_image('build1')
- self.assertEqual(info['Config']['OnBuild'], [])
+ assert not info['Config']['OnBuild']
@requires_api_version('1.25')
def test_build_with_network_mode(self):
+ # Set up pingable endpoint on custom network
+ network = self.client.create_network(random_name())['Id']
+ self.tmp_networks.append(network)
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.connect_container_to_network(
+ container, network, aliases=['pingtarget.docker']
+ )
+
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'RUN wget http://google.com'
+ 'RUN ping -c1 pingtarget.docker'
]).encode('ascii'))
stream = self.client.build(
- fileobj=script, network_mode='bridge',
- tag='dockerpytest_bridgebuild'
+ fileobj=script, network_mode=network,
+ tag='dockerpytest_customnetbuild'
)
- self.tmp_imgs.append('dockerpytest_bridgebuild')
+ self.tmp_imgs.append('dockerpytest_customnetbuild')
for chunk in stream:
pass
- assert self.client.inspect_image('dockerpytest_bridgebuild')
+ assert self.client.inspect_image('dockerpytest_customnetbuild')
script.seek(0)
stream = self.client.build(
@@ -244,6 +271,38 @@ class BuildTest(BaseAPIIntegrationTest):
with pytest.raises(errors.NotFound):
self.client.inspect_image('dockerpytest_nonebuild')
+ @requires_api_version('1.27')
+ def test_build_with_extra_hosts(self):
+ img_name = 'dockerpytest_extrahost_build'
+ self.tmp_imgs.append(img_name)
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 hello.world.test',
+ 'RUN ping -c1 extrahost.local.test',
+ 'RUN cp /etc/hosts /hosts-file'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag=img_name,
+ extra_hosts={
+ 'extrahost.local.test': '127.0.0.1',
+ 'hello.world.test': '127.0.0.1',
+ }, decode=True
+ )
+ for chunk in stream:
+ if 'errorDetail' in chunk:
+ pytest.fail(chunk)
+
+ assert self.client.inspect_image(img_name)
+ ctnr = self.run_container(img_name, 'cat /hosts-file')
+ self.tmp_containers.append(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert '127.0.0.1\textrahost.local.test' in logs
+ assert '127.0.0.1\thello.world.test' in logs
+
@requires_experimental(until=None)
@requires_api_version('1.25')
def test_build_squash(self):
@@ -267,8 +326,8 @@ class BuildTest(BaseAPIIntegrationTest):
non_squashed = build_squashed(False)
squashed = build_squashed(True)
- self.assertEqual(len(non_squashed['RootFS']['Layers']), 4)
- self.assertEqual(len(squashed['RootFS']['Layers']), 2)
+ assert len(non_squashed['RootFS']['Layers']) == 4
+ assert len(squashed['RootFS']['Layers']) == 2
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
@@ -279,7 +338,7 @@ class BuildTest(BaseAPIIntegrationTest):
]))
stream = self.client.build(
- fileobj=script, stream=True, decode=True, nocache=True
+ fileobj=script, decode=True, nocache=True
)
lines = []
for chunk in stream:
@@ -287,7 +346,7 @@ class BuildTest(BaseAPIIntegrationTest):
expected = '{0}{2}\n{1}'.format(
control_chars[0], control_chars[1], snippet
)
- self.assertTrue(any([line == expected for line in lines]))
+ assert any([line == expected for line in lines])
def test_build_gzip_encoding(self):
base_dir = tempfile.mkdtemp()
@@ -300,7 +359,7 @@ class BuildTest(BaseAPIIntegrationTest):
]))
stream = self.client.build(
- path=base_dir, stream=True, decode=True, nocache=True,
+ path=base_dir, decode=True, nocache=True,
gzip=True
)
@@ -310,6 +369,106 @@ class BuildTest(BaseAPIIntegrationTest):
assert 'Successfully built' in lines[-1]['stream']
+ def test_build_with_dockerfile_empty_lines(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('\n'.join([
+ ' ',
+ '',
+ '\t\t',
+ '\t ',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully built' in lines[-1]['stream']
+
def test_build_gzip_custom_encoding(self):
- with self.assertRaises(errors.DockerException):
+ with pytest.raises(errors.DockerException):
self.client.build(path='.', gzip=True, encoding='text/html')
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_build_invalid_platform(self):
+ script = io.BytesIO('FROM busybox\n'.encode('ascii'))
+
+ with pytest.raises(errors.APIError) as excinfo:
+ stream = self.client.build(fileobj=script, platform='foobar')
+ for _ in stream:
+ pass
+
+ assert excinfo.value.status_code == 400
+ assert 'invalid platform' in excinfo.exconly()
+
+ def test_build_out_of_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('.dockerignore\n')
+ df = tempfile.NamedTemporaryFile()
+ self.addCleanup(df.close)
+ df.write(('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ])).encode('utf-8'))
+ df.flush()
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=df.name, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 3
+ assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
+
+ def test_build_in_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ print(os.path.join(base_dir, 'custom.dockerfile'))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
index cc64158..05281f8 100644
--- a/tests/integration/api_client_test.py
+++ b/tests/integration/api_client_test.py
@@ -14,15 +14,14 @@ from .base import BaseAPIIntegrationTest
class InformationTest(BaseAPIIntegrationTest):
def test_version(self):
res = self.client.version()
- self.assertIn('GoVersion', res)
- self.assertIn('Version', res)
- self.assertEqual(len(res['Version'].split('.')), 3)
+ assert 'GoVersion' in res
+ assert 'Version' in res
def test_info(self):
res = self.client.info()
- self.assertIn('Containers', res)
- self.assertIn('Images', res)
- self.assertIn('Debug', res)
+ assert 'Containers' in res
+ assert 'Images' in res
+ assert 'Debug' in res
class LoadConfigTest(BaseAPIIntegrationTest):
@@ -36,12 +35,12 @@ class LoadConfigTest(BaseAPIIntegrationTest):
f.write('email = sakuya@scarlet.net')
f.close()
cfg = docker.auth.load_config(cfg_path)
- self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
+ assert cfg[docker.auth.INDEX_NAME] is not None
cfg = cfg[docker.auth.INDEX_NAME]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('Auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
def test_load_json_config(self):
folder = tempfile.mkdtemp()
@@ -54,12 +53,12 @@ class LoadConfigTest(BaseAPIIntegrationTest):
docker.auth.INDEX_URL, auth_, email_))
f.close()
cfg = docker.auth.load_config(cfg_path)
- self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
+ assert cfg[docker.auth.INDEX_URL] is not None
cfg = cfg[docker.auth.INDEX_URL]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('Auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
class AutoDetectVersionTest(unittest.TestCase):
@@ -67,9 +66,9 @@ class AutoDetectVersionTest(unittest.TestCase):
client = docker.APIClient(version='auto', **kwargs_from_env())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
- self.assertEqual(client_version, api_version)
+ assert client_version == api_version
api_version_2 = client.version()['ApiVersion']
- self.assertEqual(client_version, api_version_2)
+ assert client_version == api_version_2
client.close()
@@ -91,8 +90,8 @@ class ConnectionTimeoutTest(unittest.TestCase):
except:
pass
end = time.time()
- self.assertTrue(res is None)
- self.assertTrue(end - start < 2 * self.timeout)
+ assert res is None
+ assert end - start < 2 * self.timeout
class UnixconnTest(unittest.TestCase):
@@ -113,5 +112,6 @@ class UnixconnTest(unittest.TestCase):
client.close()
del client
- assert len(w) == 0, \
- "No warnings produced: {0}".format(w[0].message)
+ assert len(w) == 0, "No warnings produced: {0}".format(
+ w[0].message
+ )
diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py
new file mode 100644
index 0000000..0ffd767
--- /dev/null
+++ b/tests/integration/api_config_test.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.30')
+class ConfigAPITest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def test_create_config(self):
+ config_id = self.client.create_config(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_config_unicode_data(self):
+ config_id = self.client.create_config(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == config_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ assert self.client.remove_config(config_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_config(config_id)
+
+ def test_list_configs(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ data = self.client.configs(filters={'name': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == config_id['ID']
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
index a972c1c..e212518 100644
--- a/tests/integration/api_container_test.py
+++ b/tests/integration/api_container_test.py
@@ -1,6 +1,9 @@
import os
+import re
import signal
import tempfile
+import threading
+from datetime import datetime
import docker
from docker.constants import IS_WINDOWS_PLATFORM
@@ -9,11 +12,14 @@ from docker.utils.socket import read_exactly
import pytest
+import requests
import six
from .base import BUSYBOX, BaseAPIIntegrationTest
from .. import helpers
-from ..helpers import requires_api_version
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
class ListContainersTest(BaseAPIIntegrationTest):
@@ -21,26 +27,26 @@ class ListContainersTest(BaseAPIIntegrationTest):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res1)
+ assert 'Id' in res1
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
- self.assertEqual(size + 1, len(res2))
+ assert size + 1 == len(res2)
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
- self.assertEqual(len(retrieved), 1)
+ assert len(retrieved) == 1
retrieved = retrieved[0]
- self.assertIn('Command', retrieved)
- self.assertEqual(retrieved['Command'], six.text_type('true'))
- self.assertIn('Image', retrieved)
- self.assertRegex(retrieved['Image'], r'busybox:.*')
- self.assertIn('Status', retrieved)
+ assert 'Command' in retrieved
+ assert retrieved['Command'] == six.text_type('true')
+ assert 'Image' in retrieved
+ assert re.search(r'busybox:.*', retrieved['Image'])
+ assert 'Status' in retrieved
class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
def test_create_with_host_pid_mode(self):
@@ -49,14 +55,14 @@ class CreateContainerTest(BaseAPIIntegrationTest):
pid_mode='host', network_mode='none'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
- self.assertIn('PidMode', host_config)
- self.assertEqual(host_config['PidMode'], 'host')
+ assert 'PidMode' in host_config
+ assert host_config['PidMode'] == 'host'
def test_create_with_links(self):
res0 = self.client.create_container(
@@ -97,15 +103,15 @@ class CreateContainerTest(BaseAPIIntegrationTest):
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
- self.assertEqual(self.client.wait(container3_id), 0)
+ assert self.client.wait(container3_id)['StatusCode'] == 0
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
- self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
- self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
- self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
+ assert '{0}_NAME='.format(link_env_prefix1) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
+ assert '{0}_NAME='.format(link_env_prefix2) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
def test_create_with_restart_policy(self):
container = self.client.create_container(
@@ -118,12 +124,10 @@ class CreateContainerTest(BaseAPIIntegrationTest):
id = container['Id']
self.client.start(id)
self.client.wait(id)
- with self.assertRaises(docker.errors.APIError) as exc:
+ with pytest.raises(docker.errors.APIError) as exc:
self.client.remove_container(id)
- err = exc.exception.explanation
- self.assertIn(
- 'You cannot remove ', err
- )
+ err = exc.value.explanation
+ assert 'You cannot remove ' in err
self.client.remove_container(id, force=True)
def test_create_container_with_volumes_from(self):
@@ -142,23 +146,19 @@ class CreateContainerTest(BaseAPIIntegrationTest):
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
- with self.assertRaises(docker.errors.DockerException):
- self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True,
- volumes_from=vol_names
- )
- res2 = self.client.create_container(
+
+ res = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
- container3_id = res2['Id']
+ container3_id = res['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
- info = self.client.inspect_container(res2['Id'])
- self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
+ info = self.client.inspect_container(res['Id'])
+ assert len(info['HostConfig']['VolumesFrom']) == len(vol_names)
def create_container_readonly_fs(self):
ctnr = self.client.create_container(
@@ -167,19 +167,19 @@ class CreateContainerTest(BaseAPIIntegrationTest):
read_only=True, network_mode='none'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
- res = self.client.wait(ctnr)
- self.assertNotEqual(res, 0)
+ res = self.client.wait(ctnr)['StatusCode']
+ assert res != 0
def create_container_with_name(self):
res = self.client.create_container(BUSYBOX, 'true', name='foobar')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Name', inspect)
- self.assertEqual('/foobar', inspect['Name'])
+ assert 'Name' in inspect
+ assert '/foobar' == inspect['Name']
def create_container_privileged(self):
res = self.client.create_container(
@@ -187,24 +187,24 @@ class CreateContainerTest(BaseAPIIntegrationTest):
privileged=True, network_mode='none'
)
)
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
- self.assertEqual(inspect['Config']['Privileged'], True)
+ assert inspect['Config']['Privileged'] is True
def test_create_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
@@ -215,12 +215,10 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.inspect_container(container['Id'])
- self.assertEqual(mac_address_expected,
- res['NetworkSettings']['MacAddress'])
+ assert mac_address_expected == res['NetworkSettings']['MacAddress']
self.client.kill(id)
- @requires_api_version('1.20')
def test_group_id_ints(self):
container = self.client.create_container(
BUSYBOX, 'id -G',
@@ -234,10 +232,9 @@ class CreateContainerTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
- self.assertIn('1000', groups)
- self.assertIn('1001', groups)
+ assert '1000' in groups
+ assert '1001' in groups
- @requires_api_version('1.20')
def test_group_id_strings(self):
container = self.client.create_container(
BUSYBOX, 'id -G', host_config=self.client.create_host_config(
@@ -253,8 +250,8 @@ class CreateContainerTest(BaseAPIIntegrationTest):
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
- self.assertIn('1000', groups)
- self.assertIn('1001', groups)
+ assert '1000' in groups
+ assert '1001' in groups
def test_valid_log_driver_and_log_opt(self):
log_config = docker.types.LogConfig(
@@ -272,8 +269,8 @@ class CreateContainerTest(BaseAPIIntegrationTest):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], log_config.type)
- self.assertEqual(container_log_config['Config'], log_config.config)
+ assert container_log_config['Type'] == log_config.type
+ assert container_log_config['Config'] == log_config.config
def test_invalid_log_driver_raises_exception(self):
log_config = docker.types.LogConfig(
@@ -309,8 +306,8 @@ class CreateContainerTest(BaseAPIIntegrationTest):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], "json-file")
- self.assertEqual(container_log_config['Config'], log_config.config)
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == log_config.config
def test_valid_no_config_specified(self):
log_config = docker.types.LogConfig(
@@ -328,8 +325,8 @@ class CreateContainerTest(BaseAPIIntegrationTest):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], "json-file")
- self.assertEqual(container_log_config['Config'], {})
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == {}
def test_create_with_memory_constraints_with_str(self):
ctnr = self.client.create_container(
@@ -339,29 +336,29 @@ class CreateContainerTest(BaseAPIIntegrationTest):
mem_limit='700M'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
for limit in ['Memory', 'MemorySwap']:
- self.assertIn(limit, host_config)
+ assert limit in host_config
def test_create_with_memory_constraints_with_int(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(mem_swappiness=40)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
- self.assertIn('MemorySwappiness', host_config)
+ assert 'MemorySwappiness' in host_config
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
@@ -464,6 +461,35 @@ class CreateContainerTest(BaseAPIIntegrationTest):
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
+ reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
+ def test_create_with_cpu_rt_options(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true', host_config=self.client.create_host_config(
+ cpu_rt_period=1000, cpu_rt_runtime=500
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['CpuRealtimeRuntime'] == 500
+ assert config['HostConfig']['CpuRealtimePeriod'] == 1000
+
+ @requires_api_version('1.28')
+ def test_create_with_device_cgroup_rules(self):
+ rule = 'c 7:128 rwm'
+ ctnr = self.client.create_container(
+ BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list',
+ host_config=self.client.create_host_config(
+ device_cgroup_rules=[rule]
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['DeviceCgroupRules'] == [rule]
+ self.client.start(ctnr)
+ assert rule in self.client.logs(ctnr).decode('utf-8')
+
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
@@ -495,7 +521,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn(self.filename, logs)
+ assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -517,30 +543,86 @@ class VolumeBindTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn(self.filename, logs)
+ assert self.filename in logs
+
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, False)
+
+ @pytest.mark.xfail(
+ IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
+ )
+ @requires_api_version('1.30')
+ def test_create_with_mounts(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, True)
+ @pytest.mark.xfail(
+ IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
+ )
+ @requires_api_version('1.30')
+ def test_create_with_mounts_ro(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest,
+ read_only=True
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
+ @requires_api_version('1.30')
+ def test_create_with_volume_mount(self):
+ mount = docker.types.Mount(
+ type="volume", source=helpers.random_name(),
+ target=self.mount_dest, labels={'com.dockerpy.test': 'true'}
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.client.create_container(
+ BUSYBOX, ['true'], host_config=host_config,
+ )
+ assert container
+ inspect_data = self.client.inspect_container(container)
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount['Source'] == mount_data['Name']
+ assert mount_data['RW'] is True
+
def check_container_data(self, inspect_data, rw):
- if docker.utils.compare_version('1.20', self.client._version) < 0:
- self.assertIn('Volumes', inspect_data)
- self.assertIn(self.mount_dest, inspect_data['Volumes'])
- self.assertEqual(
- self.mount_origin, inspect_data['Volumes'][self.mount_dest]
- )
- self.assertIn(self.mount_dest, inspect_data['VolumesRW'])
- self.assertFalse(inspect_data['VolumesRW'][self.mount_dest])
- else:
- self.assertIn('Mounts', inspect_data)
- filtered = list(filter(
- lambda x: x['Destination'] == self.mount_dest,
- inspect_data['Mounts']
- ))
- self.assertEqual(len(filtered), 1)
- mount_data = filtered[0]
- self.assertEqual(mount_data['Source'], self.mount_origin)
- self.assertEqual(mount_data['RW'], rw)
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount_data['Source'] == self.mount_origin
+ assert mount_data['RW'] == rw
def run_with_volume(self, ro, *args, **kwargs):
return self.run_container(
@@ -559,7 +641,6 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
-@requires_api_version('1.20')
class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
@@ -578,7 +659,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
retrieved_data = helpers.untar_file(destination, 'data.txt')
if six.PY3:
retrieved_data = retrieved_data.decode('utf-8')
- self.assertEqual(data, retrieved_data.strip())
+ assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
@@ -590,10 +671,10 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
- self.assertIn('name', stat)
- self.assertEqual(stat['name'], 'data.txt')
- self.assertIn('size', stat)
- self.assertEqual(stat['size'], len(data))
+ assert 'name' in stat
+ assert stat['name'] == 'data.txt'
+ assert 'size' in stat
+ assert stat['size'] == len(data)
def test_copy_file_to_container(self):
data = b'Deaf To All But The Song'
@@ -616,7 +697,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
data = data.decode('utf-8')
- self.assertEqual(logs.strip(), data)
+ assert logs.strip() == data
def test_copy_directory_to_container(self):
files = ['a.py', 'b.py', 'foo/b.py']
@@ -634,10 +715,10 @@ class ArchiveTest(BaseAPIIntegrationTest):
if six.PY3:
logs = logs.decode('utf-8')
results = logs.strip().split()
- self.assertIn('a.py', results)
- self.assertIn('b.py', results)
- self.assertIn('foo/', results)
- self.assertIn('bar/', results)
+ assert 'a.py' in results
+ assert 'b.py' in results
+ assert 'foo/' in results
+ assert 'bar/' in results
class RenameContainerTest(BaseAPIIntegrationTest):
@@ -645,49 +726,49 @@ class RenameContainerTest(BaseAPIIntegrationTest):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Name', inspect)
+ assert 'Name' in inspect
if version == '1.5.0':
- self.assertEqual(name, inspect['Name'])
+ assert name == inspect['Name']
else:
- self.assertEqual('/{0}'.format(name), inspect['Name'])
+ assert '/{0}'.format(name) == inspect['Name']
class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
def test_start_container_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
def test_run_shlex_commands(self):
commands = [
@@ -706,8 +787,8 @@ class StartContainerTest(BaseAPIIntegrationTest):
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0, msg=cmd)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0, cmd
class WaitTest(BaseAPIIntegrationTest):
@@ -716,26 +797,43 @@ class WaitTest(BaseAPIIntegrationTest):
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
inspect = self.client.inspect_container(id)
- self.assertIn('Running', inspect['State'])
- self.assertEqual(inspect['State']['Running'], False)
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], exitcode)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
def test_wait_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
- exitcode = self.client.wait(res)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(res)['StatusCode']
+ assert exitcode == 0
inspect = self.client.inspect_container(res)
- self.assertIn('Running', inspect['State'])
- self.assertEqual(inspect['State']['Running'], False)
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], exitcode)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
+
+ @requires_api_version('1.30')
+ def test_wait_with_condition(self):
+ ctnr = self.client.create_container(BUSYBOX, 'true')
+ self.tmp_containers.append(ctnr)
+ with pytest.raises(requests.exceptions.ConnectionError):
+ self.client.wait(ctnr, condition='removed', timeout=1)
+
+ ctnr = self.client.create_container(
+ BUSYBOX, ['sleep', '3'],
+ host_config=self.client.create_host_config(auto_remove=True)
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ assert self.client.wait(
+ ctnr, condition='removed', timeout=5
+ )['StatusCode'] == 0
class LogsTest(BaseAPIIntegrationTest):
@@ -747,10 +845,10 @@ class LogsTest(BaseAPIIntegrationTest):
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id)
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_tail_option(self):
snippet = '''Line1
@@ -761,10 +859,10 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id, tail=1)
- self.assertEqual(logs, 'Line2\n'.encode(encoding='ascii'))
+ assert logs == 'Line2\n'.encode(encoding='ascii')
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -778,10 +876,29 @@ Line2'''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ @pytest.mark.timeout(5)
+ def test_logs_streaming_and_follow_and_cancel(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ logs = six.binary_type()
+
+ generator = self.client.logs(id, stream=True, follow=True)
+ threading.Timer(1, generator.close).start()
+
+ for chunk in generator:
+ logs += chunk
+
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -791,10 +908,10 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(container)
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -804,10 +921,26 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id, tail=0)
- self.assertEqual(logs, ''.encode(encoding='ascii'))
+ assert logs == ''.encode(encoding='ascii')
+
+ @requires_api_version('1.35')
+ def test_logs_with_until(self):
+ snippet = 'Shanghai Teahouse (Hong Meiling)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo "{0}"'.format(snippet)
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+ assert exitcode == 0
+ logs_until_1 = self.client.logs(container, until=1)
+ assert logs_until_1 == b''
+ logs_until_now = self.client.logs(container, datetime.now())
+ assert logs_until_now == (snippet + '\n').encode(encoding='ascii')
class DiffTest(BaseAPIIntegrationTest):
@@ -816,26 +949,26 @@ class DiffTest(BaseAPIIntegrationTest):
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
- self.assertEqual(len(test_diff), 1)
- self.assertIn('Kind', test_diff[0])
- self.assertEqual(test_diff[0]['Kind'], 1)
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
def test_diff_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
- self.assertEqual(len(test_diff), 1)
- self.assertIn('Kind', test_diff[0])
- self.assertEqual(test_diff[0]['Kind'], 1)
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
class StopTest(BaseAPIIntegrationTest):
@@ -846,23 +979,23 @@ class StopTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'Running' in state
+ assert state['Running'] is False
def test_stop_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
- self.assertIn('Id', container)
+ assert 'Id' in container
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'Running' in state
+ assert state['Running'] is False
class KillTest(BaseAPIIntegrationTest):
@@ -873,12 +1006,12 @@ class KillTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
def test_kill_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
@@ -887,12 +1020,12 @@ class KillTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
def test_kill_with_signal(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
@@ -901,45 +1034,45 @@ class KillTest(BaseAPIIntegrationTest):
self.client.kill(
id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9
)
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
def test_kill_with_signal_name(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
def test_kill_with_signal_integer(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
class PortTest(BaseAPIIntegrationTest):
@@ -967,8 +1100,8 @@ class PortTest(BaseAPIIntegrationTest):
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
- self.assertEqual(ip, port_bindings[port][0])
- self.assertEqual(host_port, port_bindings[port][1])
+ assert ip == port_bindings[port][0]
+ assert host_port == port_bindings[port][1]
self.client.kill(id)
@@ -1004,13 +1137,12 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.top(container, 'waux')
- self.assertEqual(
- res['Titles'],
- ['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
- 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'],
- )
- self.assertEqual(len(res['Processes']), 1)
- self.assertEqual(res['Processes'][0][10], 'sleep 60')
+ assert res['Titles'] == [
+ 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
+ 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'
+ ]
+ assert len(res['Processes']) == 1
+ assert res['Processes'][0][10] == 'sleep 60'
class RestartContainerTest(BaseAPIIntegrationTest):
@@ -1020,37 +1152,37 @@ class RestartContainerTest(BaseAPIIntegrationTest):
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
- self.assertIn('State', info)
- self.assertIn('StartedAt', info['State'])
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
- self.assertIn('State', info2)
- self.assertIn('StartedAt', info2['State'])
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
start_time2 = info2['State']['StartedAt']
- self.assertNotEqual(start_time1, start_time2)
- self.assertIn('Running', info2['State'])
- self.assertEqual(info2['State']['Running'], True)
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
self.client.kill(id)
def test_restart_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
- self.assertIn('Id', container)
+ assert 'Id' in container
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
- self.assertIn('State', info)
- self.assertIn('StartedAt', info['State'])
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
- self.assertIn('State', info2)
- self.assertIn('StartedAt', info2['State'])
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
start_time2 = info2['State']['StartedAt']
- self.assertNotEqual(start_time1, start_time2)
- self.assertIn('Running', info2['State'])
- self.assertEqual(info2['State']['Running'], True)
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
self.client.kill(id)
@@ -1063,7 +1195,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
def test_remove_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['true'])
@@ -1073,7 +1205,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
class AttachContainerTest(BaseAPIIntegrationTest):
@@ -1084,7 +1216,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
self.client.start(id)
sock = self.client.attach_socket(container, ws=False)
- self.assertTrue(sock.fileno() > -1)
+ assert sock.fileno() > -1
def test_run_container_reading_socket(self):
line = 'hi there and stuff and things, words!'
@@ -1101,9 +1233,9 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
next_size = next_frame_size(pty_stdout)
- self.assertEqual(next_size, len(line))
+ assert next_size == len(line)
data = read_exactly(pty_stdout, next_size)
- self.assertEqual(data.decode('utf-8'), line)
+ assert data.decode('utf-8') == line
def test_attach_no_stream(self):
container = self.client.create_container(
@@ -1114,6 +1246,76 @@ class AttachContainerTest(BaseAPIIntegrationTest):
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
+ @pytest.mark.timeout(5)
+ def test_attach_stream_and_cancel(self):
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo hello && sleep 60"',
+ tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ output = self.client.attach(container, stream=True, logs=True)
+
+ threading.Timer(1, output.close).start()
+
+ lines = []
+ for line in output:
+ lines.append(line)
+
+ assert len(lines) == 1
+ assert lines[0] == 'hello\r\n'.encode(encoding='ascii')
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True, 'detachKeys': 'ctrl-x'}
+ )
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
+
class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
@@ -1123,25 +1325,25 @@ class PauseTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], True)
- self.assertIn('Paused', state)
- self.assertEqual(state['Paused'], True)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is True
self.client.unpause(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], True)
- self.assertIn('Paused', state)
- self.assertEqual(state['Paused'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is False
class PruneTest(BaseAPIIntegrationTest):
@@ -1161,7 +1363,6 @@ class PruneTest(BaseAPIIntegrationTest):
class GetContainerStatsTest(BaseAPIIntegrationTest):
- @requires_api_version('1.19')
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
@@ -1171,12 +1372,11 @@ class GetContainerStatsTest(BaseAPIIntegrationTest):
response = self.client.stats(container, stream=0)
self.client.kill(container)
- self.assertEqual(type(response), dict)
+ assert type(response) == dict
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
- self.assertIn(key, response)
+ assert key in response
- @requires_api_version('1.17')
def test_get_container_stats_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
@@ -1185,10 +1385,10 @@ class GetContainerStatsTest(BaseAPIIntegrationTest):
self.client.start(container)
stream = self.client.stats(container)
for chunk in stream:
- self.assertEqual(type(chunk), dict)
+ assert type(chunk) == dict
for key in ['read', 'network', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
- self.assertIn(key, chunk)
+ assert key in chunk
class ContainerUpdateTest(BaseAPIIntegrationTest):
@@ -1205,7 +1405,7 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.update_container(container, mem_limit=new_mem_limit)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
+ assert inspect_data['HostConfig']['Memory'] == new_mem_limit
@requires_api_version('1.23')
def test_restart_policy_update(self):
@@ -1228,18 +1428,17 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
self.client.update_container(container,
restart_policy=new_restart_policy)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(
- inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'],
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'] ==
new_restart_policy['MaximumRetryCount']
)
- self.assertEqual(
- inspect_data['HostConfig']['RestartPolicy']['Name'],
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['Name'] ==
new_restart_policy['Name']
)
class ContainerCPUTest(BaseAPIIntegrationTest):
- @requires_api_version('1.18')
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
@@ -1250,9 +1449,8 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512)
+ assert inspect_data['HostConfig']['CpuShares'] == 512
- @requires_api_version('1.18')
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
@@ -1263,7 +1461,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
+ assert inspect_data['HostConfig']['CpusetCpus'] == cpuset_cpus
@requires_api_version('1.25')
def test_create_with_runtime(self):
@@ -1307,11 +1505,11 @@ class LinkTest(BaseAPIIntegrationTest):
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
- self.assertEqual(len(retrieved), 0)
+ assert len(retrieved) == 0
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
- self.assertEqual(len(retrieved), 2)
+ assert len(retrieved) == 2
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
index 7a65041..1a5a4e5 100644
--- a/tests/integration/api_exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -2,7 +2,9 @@ from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
from .base import BaseAPIIntegrationTest, BUSYBOX
-from ..helpers import requires_api_version
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
class ExecTest(BaseAPIIntegrationTest):
@@ -14,10 +16,10 @@ class ExecTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
- self.assertIn('Id', res)
+ assert 'Id' in res
exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'hello\n')
+ assert exec_log == b'hello\n'
def test_exec_command_string(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -27,10 +29,10 @@ class ExecTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
- self.assertIn('Id', res)
+ assert 'Id' in res
exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'hello world\n')
+ assert exec_log == b'hello world\n'
def test_exec_command_as_user(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -40,10 +42,10 @@ class ExecTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
- self.assertIn('Id', res)
+ assert 'Id' in res
exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'default\n')
+ assert exec_log == b'default\n'
def test_exec_command_as_root(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -53,10 +55,10 @@ class ExecTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
- self.assertIn('Id', res)
+ assert 'Id' in res
exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'root\n')
+ assert exec_log == b'root\n'
def test_exec_command_streaming(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -66,12 +68,12 @@ class ExecTest(BaseAPIIntegrationTest):
self.client.start(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
- self.assertIn('Id', exec_id)
+ assert 'Id' in exec_id
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
- self.assertEqual(res, b'hello\nworld\n')
+ assert res == b'hello\nworld\n'
def test_exec_start_socket(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -84,15 +86,15 @@ class ExecTest(BaseAPIIntegrationTest):
# `echo` appends CRLF, `printf` doesn't
exec_id = self.client.exec_create(
container_id, ['printf', line], tty=True)
- self.assertIn('Id', exec_id)
+ assert 'Id' in exec_id
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
next_size = next_frame_size(socket)
- self.assertEqual(next_size, len(line))
+ assert next_size == len(line)
data = read_exactly(socket, next_size)
- self.assertEqual(data.decode('utf-8'), line)
+ assert data.decode('utf-8') == line
def test_exec_start_detached(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -103,11 +105,11 @@ class ExecTest(BaseAPIIntegrationTest):
exec_id = self.client.exec_create(
container_id, ['printf', "asdqwe"])
- self.assertIn('Id', exec_id)
+ assert 'Id' in exec_id
response = self.client.exec_start(exec_id, detach=True)
- self.assertEqual(response, "")
+ assert response == ""
def test_exec_inspect(self):
container = self.client.create_container(BUSYBOX, 'cat',
@@ -117,11 +119,11 @@ class ExecTest(BaseAPIIntegrationTest):
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
- self.assertIn('Id', exec_id)
+ assert 'Id' in exec_id
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
- self.assertIn('ExitCode', exec_info)
- self.assertNotEqual(exec_info['ExitCode'], 0)
+ assert 'ExitCode' in exec_info
+ assert exec_info['ExitCode'] != 0
@requires_api_version('1.25')
def test_exec_command_with_env(self):
@@ -136,3 +138,68 @@ class ExecTest(BaseAPIIntegrationTest):
exec_log = self.client.exec_start(res)
assert b'X=Y\n' in exec_log
+
+ @requires_api_version('1.35')
+ def test_exec_command_with_workdir(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ res = self.client.exec_create(container, 'pwd', workdir='/var/www')
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'/var/www\n'
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat',
+ stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
index 211042d..5dbac37 100644
--- a/tests/integration/api_healthcheck_test.py
+++ b/tests/integration/api_healthcheck_test.py
@@ -20,8 +20,9 @@ class HealthcheckTest(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
res = self.client.inspect_container(container)
- assert res['Config']['Healthcheck']['Test'] == \
- ['CMD-SHELL', 'echo "hello world"']
+ assert res['Config']['Healthcheck']['Test'] == [
+ 'CMD-SHELL', 'echo "hello world"'
+ ]
@helpers.requires_api_version('1.24')
def test_healthcheck_passes(self):
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
index 14fb77a..050e7f3 100644
--- a/tests/integration/api_image_test.py
+++ b/tests/integration/api_image_test.py
@@ -14,26 +14,26 @@ from six.moves import socketserver
import docker
-from ..helpers import requires_api_version
+from ..helpers import requires_api_version, requires_experimental
from .base import BaseAPIIntegrationTest, BUSYBOX
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
- self.assertIn('Id', res1[0])
+ assert 'Id' in res1[0]
res10 = res1[0]
- self.assertIn('Created', res10)
- self.assertIn('RepoTags', res10)
+ assert 'Created' in res10
+ assert 'RepoTags' in res10
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
- self.assertEqual(len(distinct), self.client.info()['Images'])
+ assert len(distinct) == self.client.info()['Images']
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
- self.assertEqual(type(res1[0]), six.text_type)
+ assert type(res1[0]) == six.text_type
class PullImageTest(BaseAPIIntegrationTest):
@@ -44,12 +44,10 @@ class PullImageTest(BaseAPIIntegrationTest):
pass
res = self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
- self.assertEqual(type(res), six.text_type)
- self.assertGreaterEqual(
- len(self.client.images('hello-world')), 1
- )
+ assert type(res) == six.text_type
+ assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
- self.assertIn('Id', img_info)
+ assert 'Id' in img_info
def test_pull_streaming(self):
try:
@@ -61,11 +59,18 @@ class PullImageTest(BaseAPIIntegrationTest):
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
- self.assertGreaterEqual(
- len(self.client.images('hello-world')), 1
- )
+ assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
- self.assertIn('Id', img_info)
+ assert 'Id' in img_info
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_pull_invalid_platform(self):
+ with pytest.raises(docker.errors.APIError) as excinfo:
+ self.client.pull('hello-world', platform='foobar')
+
+ assert excinfo.value.status_code == 500
+ assert 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
@@ -75,18 +80,18 @@ class CommitTest(BaseAPIIntegrationTest):
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
- self.assertIn('Id', res)
+ assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
- self.assertIn('Container', img)
- self.assertTrue(img['Container'].startswith(id))
- self.assertIn('ContainerConfig', img)
- self.assertIn('Image', img['ContainerConfig'])
- self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
+ assert 'Container' in img
+ assert img['Container'].startswith(id)
+ assert 'ContainerConfig' in img
+ assert 'Image' in img['ContainerConfig']
+ assert BUSYBOX == img['ContainerConfig']['Image']
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
- self.assertIn('Parent', img)
- self.assertEqual(img['Parent'], busybox_id)
+ assert 'Parent' in img
+ assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
@@ -110,14 +115,14 @@ class RemoveImageTest(BaseAPIIntegrationTest):
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
- self.assertIn('Id', res)
+ assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
- self.assertIn({"Deleted": img_id}, logs)
+ assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
class ImportImageTest(BaseAPIIntegrationTest):
@@ -171,7 +176,7 @@ class ImportImageTest(BaseAPIIntegrationTest):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -186,9 +191,9 @@ class ImportImageTest(BaseAPIIntegrationTest):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -201,9 +206,9 @@ class ImportImageTest(BaseAPIIntegrationTest):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -296,9 +301,9 @@ class ImportImageTest(BaseAPIIntegrationTest):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -320,7 +325,7 @@ class PruneImagesTest(BaseAPIIntegrationTest):
img_id = self.client.inspect_image('hello-world')['Id']
result = self.client.prune_images()
assert img_id not in [
- img.get('Deleted') for img in result['ImagesDeleted']
+ img.get('Deleted') for img in result.get('ImagesDeleted') or []
]
result = self.client.prune_images({'dangling': False})
assert result['SpaceReclaimed'] > 0
@@ -330,3 +335,34 @@ class PruneImagesTest(BaseAPIIntegrationTest):
assert img_id in [
img.get('Deleted') for img in result['ImagesDeleted']
]
+
+
+class SaveLoadImagesTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.23')
+ def test_get_image_load_image(self):
+ with tempfile.TemporaryFile() as f:
+ stream = self.client.get_image(BUSYBOX)
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = self.client.load_image(f.read())
+
+ success = False
+ result_line = 'Loaded image: {}\n'.format(BUSYBOX)
+ for data in result:
+ print(data)
+ if 'stream' in data:
+ if data['stream'] == result_line:
+ success = True
+ break
+ assert success is True
+
+
+@requires_api_version('1.30')
+class InspectDistributionTest(BaseAPIIntegrationTest):
+ def test_inspect_distribution(self):
+ data = self.client.inspect_distribution('busybox:latest')
+ assert data is not None
+ assert 'Platforms' in data
+ assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
index 5439dd7..b6726d0 100644
--- a/tests/integration/api_network_test.py
+++ b/tests/integration/api_network_test.py
@@ -17,33 +17,30 @@ class TestNetworks(BaseAPIIntegrationTest):
self.tmp_networks.append(net_id)
return (net_name, net_id)
- @requires_api_version('1.21')
def test_list_networks(self):
networks = self.client.networks()
net_name, net_id = self.create_network()
networks = self.client.networks()
- self.assertTrue(net_id in [n['Id'] for n in networks])
+ assert net_id in [n['Id'] for n in networks]
networks_by_name = self.client.networks(names=[net_name])
- self.assertEqual([n['Id'] for n in networks_by_name], [net_id])
+ assert [n['Id'] for n in networks_by_name] == [net_id]
networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
- self.assertEqual([n['Id'] for n in networks_by_partial_id], [net_id])
+ assert [n['Id'] for n in networks_by_partial_id] == [net_id]
- @requires_api_version('1.21')
def test_inspect_network(self):
net_name, net_id = self.create_network()
net = self.client.inspect_network(net_id)
- self.assertEqual(net['Id'], net_id)
- self.assertEqual(net['Name'], net_name)
- self.assertEqual(net['Driver'], 'bridge')
- self.assertEqual(net['Scope'], 'local')
- self.assertEqual(net['IPAM']['Driver'], 'default')
+ assert net['Id'] == net_id
+ assert net['Name'] == net_name
+ assert net['Driver'] == 'bridge'
+ assert net['Scope'] == 'local'
+ assert net['IPAM']['Driver'] == 'default'
- @requires_api_version('1.21')
def test_create_network_with_ipam_config(self):
_, net_id = self.create_network(
ipam=IPAMConfig(
@@ -81,12 +78,10 @@ class TestNetworks(BaseAPIIntegrationTest):
},
}]
- @requires_api_version('1.21')
def test_create_network_with_host_driver_fails(self):
with pytest.raises(docker.errors.APIError):
self.client.create_network(random_name(), driver='host')
- @requires_api_version('1.21')
def test_remove_network(self):
net_name, net_id = self.create_network()
assert net_name in [n['Name'] for n in self.client.networks()]
@@ -94,7 +89,6 @@ class TestNetworks(BaseAPIIntegrationTest):
self.client.remove_network(net_id)
assert net_name not in [n['Name'] for n in self.client.networks()]
- @requires_api_version('1.21')
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
@@ -103,21 +97,20 @@ class TestNetworks(BaseAPIIntegrationTest):
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
- [container['Id']]
- )
+ assert list(network_data['Containers'].keys()) == [
+ container['Id']
+ ]
with pytest.raises(docker.errors.APIError):
self.client.connect_container_to_network(container, net_id)
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(container, net_id)
@@ -131,18 +124,16 @@ class TestNetworks(BaseAPIIntegrationTest):
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
+ assert list(network_data['Containers'].keys()) == \
[container['Id']]
- )
self.client.disconnect_container_from_network(container, net_id, True)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(
@@ -166,7 +157,6 @@ class TestNetworks(BaseAPIIntegrationTest):
assert 'foo' in aliases
assert 'bar' in aliases
- @requires_api_version('1.21')
def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
@@ -179,13 +169,12 @@ class TestNetworks(BaseAPIIntegrationTest):
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
- [container['Id']])
+ assert list(network_data['Containers'].keys()) == \
+ [container['Id']]
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
@requires_api_version('1.22')
def test_create_with_aliases(self):
@@ -233,14 +222,11 @@ class TestNetworks(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
self.client.start(container)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data[
- 'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
- 'IPv4Address'
- ],
- '132.124.0.23'
- )
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv4Address']\
+ == '132.124.0.23'
@requires_api_version('1.22')
def test_create_with_ipv6_address(self):
@@ -262,14 +248,11 @@ class TestNetworks(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
self.client.start(container)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data[
- 'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
- 'IPv6Address'
- ],
- '2001:389::f00d'
- )
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv6Address']\
+ == '2001:389::f00d'
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
@@ -305,10 +288,12 @@ class TestNetworks(BaseAPIIntegrationTest):
}),
)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data['NetworkSettings']['Networks'][net_name]['Links'],
- ['docker-py-test-upstream:bar'])
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
self.create_and_start(
name='docker-py-test-upstream',
@@ -317,10 +302,9 @@ class TestNetworks(BaseAPIIntegrationTest):
self.execute(container, ['nslookup', 'bar'])
- @requires_api_version('1.21')
def test_create_check_duplicate(self):
net_name, net_id = self.create_network()
- with self.assertRaises(docker.errors.APIError):
+ with pytest.raises(docker.errors.APIError):
self.client.create_network(net_name, check_duplicate=True)
net_id = self.client.create_network(net_name, check_duplicate=False)
self.tmp_networks.append(net_id['Id'])
@@ -337,10 +321,12 @@ class TestNetworks(BaseAPIIntegrationTest):
container, net_name,
links=[('docker-py-test-upstream', 'bar')])
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data['NetworkSettings']['Networks'][net_name]['Links'],
- ['docker-py-test-upstream:bar'])
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
self.create_and_start(
name='docker-py-test-upstream',
@@ -373,9 +359,7 @@ class TestNetworks(BaseAPIIntegrationTest):
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
- self.assertEqual(
- net_data['IPAMConfig']['IPv4Address'], '172.28.5.24'
- )
+ assert net_data['IPAMConfig']['IPv4Address'] == '172.28.5.24'
@requires_api_version('1.22')
def test_connect_with_ipv6_address(self):
@@ -401,9 +385,7 @@ class TestNetworks(BaseAPIIntegrationTest):
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
- self.assertEqual(
- net_data['IPAMConfig']['IPv6Address'], '2001:389::f00d'
- )
+ assert net_data['IPAMConfig']['IPv6Address'] == '2001:389::f00d'
@requires_api_version('1.23')
def test_create_internal_networks(self):
@@ -447,14 +429,14 @@ class TestNetworks(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_network_attachable(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
_, net_id = self.create_network(driver='overlay', attachable=True)
net = self.client.inspect_network(net_id)
assert net['Attachable'] is True
@requires_api_version('1.29')
def test_create_network_ingress(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
self.client.remove_network('ingress')
_, net_id = self.create_network(driver='overlay', ingress=True)
net = self.client.inspect_network(net_id)
@@ -465,3 +447,28 @@ class TestNetworks(BaseAPIIntegrationTest):
net_name, _ = self.create_network()
result = self.client.prune_networks()
assert net_name in result['NetworksDeleted']
+
+ @requires_api_version('1.31')
+ def test_create_inspect_network_with_scope(self):
+ assert self.init_swarm()
+ net_name_loc, net_id_loc = self.create_network(scope='local')
+
+ assert self.client.inspect_network(net_name_loc)
+ assert self.client.inspect_network(net_name_loc, scope='local')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_loc, scope='global')
+
+ net_name_swarm, net_id_swarm = self.create_network(
+ driver='overlay', scope='swarm'
+ )
+
+ assert self.client.inspect_network(net_name_swarm)
+ assert self.client.inspect_network(net_name_swarm, scope='swarm')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_swarm, scope='local')
+
+ def test_create_remove_network_with_space_in_name(self):
+ net_id = self.client.create_network('test 01')
+ self.tmp_networks.append(net_id)
+ assert self.client.inspect_network('test 01')
+ assert self.client.remove_network('test 01') is None # does not raise
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
index dcd880f..b3d93b8 100644
--- a/tests/integration/api_secret_test.py
+++ b/tests/integration/api_secret_test.py
@@ -9,13 +9,16 @@ from .base import BaseAPIIntegrationTest
@requires_api_version('1.25')
class SecretAPITest(BaseAPIIntegrationTest):
- def setUp(self):
- super(SecretAPITest, self).setUp()
- self.init_swarm()
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
- def tearDown(self):
- super(SecretAPITest, self).tearDown()
- force_leave_swarm(self.client)
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
def test_create_secret(self):
secret_id = self.client.create_secret(
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
index c966916..85f9dcc 100644
--- a/tests/integration/api_service_test.py
+++ b/tests/integration/api_service_test.py
@@ -4,6 +4,7 @@ import random
import time
import docker
+import pytest
import six
from ..helpers import (
@@ -13,19 +14,24 @@ from .base import BaseAPIIntegrationTest, BUSYBOX
class ServiceTest(BaseAPIIntegrationTest):
- def setUp(self):
- super(ServiceTest, self).setUp()
- force_leave_swarm(self.client)
- self.init_swarm()
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
def tearDown(self):
- super(ServiceTest, self).tearDown()
for service in self.client.services(filters={'name': 'dockerpytest_'}):
try:
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
- force_leave_swarm(self.client)
+ super(ServiceTest, self).tearDown()
def get_service_name(self):
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
@@ -47,7 +53,7 @@ class ServiceTest(BaseAPIIntegrationTest):
return None
time.sleep(interval)
- def create_simple_service(self, name=None):
+ def create_simple_service(self, name=None, labels=None):
if name:
name = 'dockerpytest_{0}'.format(name)
else:
@@ -57,7 +63,9 @@ class ServiceTest(BaseAPIIntegrationTest):
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
- return name, self.client.create_service(task_tmpl, name=name)
+ return name, self.client.create_service(
+ task_tmpl, name=name, labels=labels
+ )
@requires_api_version('1.24')
def test_list_services(self):
@@ -71,6 +79,15 @@ class ServiceTest(BaseAPIIntegrationTest):
assert len(test_services) == 1
assert 'dockerpytest_' in test_services[0]['Spec']['Name']
+ @requires_api_version('1.24')
+ def test_list_services_filter_by_label(self):
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 0
+ self.create_simple_service(labels={'test_label': 'testing'})
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 1
+ assert test_services[0]['Spec']['Labels']['test_label'] == 'testing'
+
def test_inspect_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_id)
@@ -83,6 +100,17 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'ID' in svc_info
assert svc_info['ID'] == svc_id['ID']
+ @requires_api_version('1.29')
+ def test_inspect_service_insert_defaults(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ svc_info_defaults = self.client.inspect_service(
+ svc_id, insert_defaults=True
+ )
+ assert svc_info != svc_info_defaults
+ assert 'RollbackConfig' in svc_info_defaults['Spec']
+ assert 'RollbackConfig' not in svc_info['Spec']
+
def test_remove_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
assert self.client.remove_service(svc_id)
@@ -185,6 +213,57 @@ class ServiceTest(BaseAPIIntegrationTest):
'Reservations'
]
+ def _create_service_with_generic_resources(self, generic_resources):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+
+ resources = docker.types.Resources(
+ generic_resources=generic_resources
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ return resources, self.client.inspect_service(svc_id)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_generic_resources(self):
+ successful = [{
+ 'input': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 1}},
+ {'NamedResourceSpec': {'Kind': 'gpu', 'Value': 'test'}}
+ ]}, {
+ 'input': {'gpu': 2, 'mpi': 'latest'},
+ 'expected': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 2}},
+ {'NamedResourceSpec': {'Kind': 'mpi', 'Value': 'latest'}}
+ ]}
+ ]
+
+ for test in successful:
+ t = test['input']
+ resrcs, svc_info = self._create_service_with_generic_resources(t)
+
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ res_reservations = res_template['Resources']['Reservations']
+ assert res_reservations == resrcs['Reservations']
+ assert 'GenericResources' in res_reservations
+
+ def _key(d, specs=('DiscreteResourceSpec', 'NamedResourceSpec')):
+ return [d.get(s, {}).get('Kind', '') for s in specs]
+
+ actual = res_reservations['GenericResources']
+ expected = test.get('expected', test['input'])
+ assert sorted(actual, key=_key) == sorted(expected, key=_key)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_invalid_generic_resources(self):
+ for test_input in ['1', 1.0, lambda: '1', {1, 2}]:
+ with pytest.raises(docker.errors.InvalidArgument):
+ self._create_service_with_generic_resources(test_input)
+
def test_create_service_with_update_config(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
@@ -326,7 +405,6 @@ class ServiceTest(BaseAPIIntegrationTest):
task_tmpl, name=name, endpoint_spec=endpoint_spec
)
svc_info = self.client.inspect_service(svc_id)
- print(svc_info)
ports = svc_info['Spec']['EndpointSpec']['Ports']
for port in ports:
if port['PublishedPort'] == 12562:
@@ -343,6 +421,26 @@ class ServiceTest(BaseAPIIntegrationTest):
assert len(ports) == 3
+ @requires_api_version('1.32')
+ def test_create_service_with_endpoint_spec_host_publish_mode(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, None, 'host'),
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ assert len(ports) == 1
+ port = ports[0]
+ assert port['PublishedPort'] == 12357
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'tcp'
+ assert port['PublishMode'] == 'host'
+
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
@@ -359,6 +457,24 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'Env' in con_spec
assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
+ @requires_api_version('1.29')
+ def test_create_service_with_update_order(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, order='start-first'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['Order'] == uc['Order']
+
@requires_api_version('1.25')
def test_create_service_with_tty(self):
container_spec = docker.types.ContainerSpec(
@@ -493,3 +609,647 @@ class ServiceTest(BaseAPIIntegrationTest):
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
assert container_secret == secret_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_config(self):
+ config_name = 'favorite_touhou'
+ config_data = b'phantasmagoria of flower view'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ assert self.client.exec_start(exec_id) == config_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_unicode_config(self):
+ config_name = 'favorite_touhou'
+ config_data = u'東方花映塚'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ container_config = self.client.exec_start(exec_id)
+ container_config = container_config.decode('utf-8')
+ assert container_config == config_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hosts(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hosts={
+ 'foobar': '127.0.0.1',
+ 'baz': '8.8.8.8',
+ }
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hosts' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ hosts = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
+ assert len(hosts) == 2
+ assert '127.0.0.1 foobar' in hosts
+ assert '8.8.8.8 baz' in hosts
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hostname(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hostname='foobar.baz.com'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hostname' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hostname'] ==
+ 'foobar.baz.com'
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_groups(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], groups=['shrinemaidens', 'youkais']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Groups' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ groups = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Groups']
+ assert len(groups) == 2
+ assert 'shrinemaidens' in groups
+ assert 'youkais' in groups
+
+ @requires_api_version('1.25')
+ def test_create_service_with_dns_config(self):
+ dns_config = docker.types.DNSConfig(
+ nameservers=['8.8.8.8', '8.8.4.4'],
+ search=['local'], options=['debug']
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], dns_config=dns_config
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'DNSConfig' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ dns_config ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ @requires_api_version('1.28')
+ def test_create_service_with_readonly(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], read_only=True
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'ReadOnly' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert svc_info['Spec']['TaskTemplate']['ContainerSpec']['ReadOnly']
+
+ @requires_api_version('1.28')
+ def test_create_service_with_stop_signal(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'StopSignal' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['StopSignal'] ==
+ 'SIGINT'
+ )
+
+ @requires_api_version('1.30')
+ def test_create_service_with_privileges(self):
+ priv = docker.types.Privileges(selinux_disable=True)
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], privileges=priv
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Privileges' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ privileges = (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Privileges']
+ )
+ assert privileges['SELinuxContext']['Disable'] is True
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_name(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode(mode='replicated', replicas=2)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+
+ def test_update_service_with_defaults_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ newer_index = svc_info['Version']['Index']
+ assert newer_index > new_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ def test_update_service_with_defaults_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ self._update_service(
+ svc_id, name, new_index, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}
+ ]
+
+ def test_update_service_with_defaults_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ print(svc_info)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ @requires_api_version('1.25')
+ def test_update_service_remove_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ 'Healthcheck' not in container_spec or
+ not container_spec['Healthcheck']
+ )
+
+ def test_update_service_remove_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={}, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert not svc_info['Spec'].get('Labels')
+
+ def test_update_service_remove_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert not container_spec.get('Labels')
+
+ @requires_api_version('1.29')
+ def test_update_service_with_network_change(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ net1 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[net1['Id']]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert len(svc_info['Spec']['Networks']) > 0
+ assert svc_info['Spec']['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ networks=[net2['Id']], fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ self._update_service(
+ svc_id, name, new_index, name=name, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'ContainerSpec' in task_template
+ new_spec = task_template['ContainerSpec']
+ assert 'Image' in new_spec
+ assert new_spec['Image'].split(':')[0] == 'busybox'
+ assert 'Command' in new_spec
+ assert new_spec['Command'] == ['echo', 'hello']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, networks=[net2['Id']]
+ )
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ def _update_service(self, svc_id, *args, **kwargs):
+ # service update tests seem to be a bit flaky
+ # give them a chance to retry the update with a new version index
+ try:
+ self.client.update_service(*args, **kwargs)
+ except docker.errors.APIError as e:
+ if e.explanation.endswith("update out of sequence"):
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ if len(args) > 1:
+ args = (args[0], version_index) + args[2:]
+ else:
+ kwargs['version'] = version_index
+
+ self.client.update_service(*args, **kwargs)
+ else:
+ raise
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
index 666c689..dbf3786 100644
--- a/tests/integration/api_swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -10,9 +10,16 @@ class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
force_leave_swarm(self.client)
+ self._unlock_key = None
def tearDown(self):
super(SwarmTest, self).tearDown()
+ try:
+ if self._unlock_key:
+ self.client.unlock_swarm(self._unlock_key)
+ except docker.errors.APIError:
+ pass
+
force_leave_swarm(self.client)
@requires_api_version('1.24')
@@ -45,6 +52,48 @@ class SwarmTest(BaseAPIIntegrationTest):
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+ @requires_api_version('1.30')
+ def test_init_swarm_with_ca_config(self):
+ spec = self.client.create_swarm_spec(
+ node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000
+ )
+
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == (
+ spec['CAConfig']['NodeCertExpiry']
+ )
+ assert swarm_info['Spec']['CAConfig']['ForceRotate'] == (
+ spec['CAConfig']['ForceRotate']
+ )
+
+ @requires_api_version('1.25')
+ def test_init_swarm_with_autolock_managers(self):
+ spec = self.client.create_swarm_spec(autolock_managers=True)
+ assert self.init_swarm(swarm_spec=spec)
+ # save unlock key for tearDown
+ self._unlock_key = self.client.get_unlock_key()
+ swarm_info = self.client.inspect_swarm()
+
+ assert (
+ swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True
+ )
+
+ assert self._unlock_key.get('UnlockKey')
+
+ @requires_api_version('1.25')
+ @pytest.mark.xfail(
+ reason="This doesn't seem to be taken into account by the engine"
+ )
+ def test_init_swarm_with_log_driver(self):
+ spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}}
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+
+ assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == (
+ 'syslog'
+ )
+
@requires_api_version('1.24')
def test_leave_swarm(self):
assert self.init_swarm()
@@ -89,24 +138,6 @@ class SwarmTest(BaseAPIIntegrationTest):
)
@requires_api_version('1.24')
- def test_update_swarm_name(self):
- assert self.init_swarm()
- swarm_info_1 = self.client.inspect_swarm()
- spec = self.client.create_swarm_spec(
- node_cert_expiry=7776000000000000, name='reimuhakurei'
- )
- assert self.client.update_swarm(
- version=swarm_info_1['Version']['Index'], swarm_spec=spec
- )
- swarm_info_2 = self.client.inspect_swarm()
-
- assert (
- swarm_info_1['Version']['Index'] !=
- swarm_info_2['Version']['Index']
- )
- assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
-
- @requires_api_version('1.24')
def test_list_nodes(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
diff --git a/tests/integration/api_volume_test.py b/tests/integration/api_volume_test.py
index 5a4bb1e..8e7dd3a 100644
--- a/tests/integration/api_volume_test.py
+++ b/tests/integration/api_volume_test.py
@@ -5,16 +5,15 @@ from ..helpers import requires_api_version
from .base import BaseAPIIntegrationTest
-@requires_api_version('1.21')
class TestVolumes(BaseAPIIntegrationTest):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
@@ -27,16 +26,16 @@ class TestVolumes(BaseAPIIntegrationTest):
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
- self.assertIn('Volumes', result)
+ assert 'Volumes' in result
volumes = result['Volumes']
- self.assertIn(volume_info, volumes)
+ assert volume_info in volumes
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
- self.assertEqual(volume_info, result)
+ assert volume_info == result
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
diff --git a/tests/integration/base.py b/tests/integration/base.py
index 3c01689..56c23ed 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -4,7 +4,6 @@ import unittest
import docker
from docker.utils import kwargs_from_env
-import six
from .. import helpers
@@ -19,9 +18,6 @@ class BaseIntegrationTest(unittest.TestCase):
"""
def setUp(self):
- if six.PY2:
- self.assertRegex = self.assertRegexpMatches
- self.assertCountEqual = self.assertItemsEqual
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
@@ -29,6 +25,7 @@ class BaseIntegrationTest(unittest.TestCase):
self.tmp_networks = []
self.tmp_plugins = []
self.tmp_secrets = []
+ self.tmp_configs = []
def tearDown(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -39,7 +36,7 @@ class BaseIntegrationTest(unittest.TestCase):
pass
for container in self.tmp_containers:
try:
- client.api.remove_container(container, force=True)
+ client.api.remove_container(container, force=True, v=True)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
@@ -59,6 +56,12 @@ class BaseIntegrationTest(unittest.TestCase):
except docker.errors.APIError:
pass
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
for folder in self.tmp_folders:
shutil.rmtree(folder)
@@ -71,19 +74,29 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
def setUp(self):
super(BaseAPIIntegrationTest, self).setUp()
- self.client = docker.APIClient(
- version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
- )
+ self.client = self.get_client_instance()
def tearDown(self):
super(BaseAPIIntegrationTest, self).tearDown()
self.client.close()
+ @staticmethod
+ def get_client_instance():
+ return docker.APIClient(
+ version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
+ )
+
+ @staticmethod
+ def _init_swarm(client, **kwargs):
+ return client.init_swarm(
+ '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
+
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
- exitcode = self.client.wait(container)
+ exitcode = self.client.wait(container)['StatusCode']
if exitcode != 0:
output = self.client.logs(container)
@@ -109,6 +122,4 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
assert actual_exit_code == exit_code, msg
def init_swarm(self, **kwargs):
- return self.client.init_swarm(
- 'eth0', listen_addr=helpers.swarm_listen_addr(), **kwargs
- )
+ return self._init_swarm(self.client, **kwargs)
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
index 8f6bd86..7df172c 100644
--- a/tests/integration/client_test.py
+++ b/tests/integration/client_test.py
@@ -1,7 +1,10 @@
+import threading
import unittest
import docker
+from datetime import datetime, timedelta
+
from ..helpers import requires_api_version
from .base import TEST_API_VERSION
@@ -27,3 +30,20 @@ class ClientTest(unittest.TestCase):
assert 'Containers' in data
assert 'Volumes' in data
assert 'Images' in data
+
+
+class CancellableEventsTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_cancel_events(self):
+ start = datetime.now()
+
+ events = self.client.events(until=start + timedelta(seconds=5))
+
+ cancel_thread = threading.Timer(2, events.close)
+ cancel_thread.start()
+
+ for _ in events:
+ pass
+
+ self.assertLess(datetime.now() - start, timedelta(seconds=3))
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
index dc5cef4..ac74d72 100644
--- a/tests/integration/errors_test.py
+++ b/tests/integration/errors_test.py
@@ -1,14 +1,15 @@
from docker.errors import APIError
from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
class ErrorsTest(BaseAPIIntegrationTest):
def test_api_error_parses_json(self):
container = self.client.create_container(BUSYBOX, ['sleep', '10'])
self.client.start(container['Id'])
- with self.assertRaises(APIError) as cm:
+ with pytest.raises(APIError) as cm:
self.client.remove_container(container['Id'])
- explanation = cm.exception.explanation
+ explanation = cm.value.explanation
assert 'You cannot remove a running container' in explanation
assert '{"message":' not in explanation
self.client.remove_container(container['Id'], force=True)
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
index ce3349b..6ddb034 100644
--- a/tests/integration/models_containers_test.py
+++ b/tests/integration/models_containers_test.py
@@ -1,17 +1,19 @@
-import docker
import tempfile
+import threading
+
+import docker
+import pytest
from .base import BaseIntegrationTest, TEST_API_VERSION
-from ..helpers import random_name
+from ..helpers import random_name, requires_api_version
class ContainerCollectionTest(BaseIntegrationTest):
def test_run(self):
client = docker.from_env(version=TEST_API_VERSION)
- self.assertEqual(
- client.containers.run("alpine", "echo hello world", remove=True),
- b'hello world\n'
- )
+ assert client.containers.run(
+ "alpine", "echo hello world", remove=True
+ ) == b'hello world\n'
def test_run_detach(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -22,16 +24,16 @@ class ContainerCollectionTest(BaseIntegrationTest):
def test_run_with_error(self):
client = docker.from_env(version=TEST_API_VERSION)
- with self.assertRaises(docker.errors.ContainerError) as cm:
+ with pytest.raises(docker.errors.ContainerError) as cm:
client.containers.run("alpine", "cat /test", remove=True)
- assert cm.exception.exit_status == 1
- assert "cat /test" in str(cm.exception)
- assert "alpine" in str(cm.exception)
- assert "No such file or directory" in str(cm.exception)
+ assert cm.value.exit_status == 1
+ assert "cat /test" in cm.exconly()
+ assert "alpine" in cm.exconly()
+ assert "No such file or directory" in cm.exconly()
def test_run_with_image_that_does_not_exist(self):
client = docker.from_env(version=TEST_API_VERSION)
- with self.assertRaises(docker.errors.ImageNotFound):
+ with pytest.raises(docker.errors.ImageNotFound):
client.containers.run("dockerpytest_does_not_exist")
def test_run_with_volume(self):
@@ -46,15 +48,19 @@ class ContainerCollectionTest(BaseIntegrationTest):
self.tmp_containers.append(container.id)
container.wait()
+ name = "container_volume_test"
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
- volumes=["%s:/insidecontainer" % path]
+ volumes=["%s:/insidecontainer" % path],
+ name=name
)
- self.assertEqual(out, b'hello\n')
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
def test_run_with_named_volume(self):
client = docker.from_env(version=TEST_API_VERSION)
- client.volumes.create(name="somevolume")
+ volume = client.volumes.create(name="somevolume")
+ self.tmp_volumes.append(volume.id)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
@@ -64,11 +70,14 @@ class ContainerCollectionTest(BaseIntegrationTest):
self.tmp_containers.append(container.id)
container.wait()
+ name = "container_volume_test"
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
- volumes=["somevolume:/insidecontainer"]
+ volumes=["somevolume:/insidecontainer"],
+ name=name
)
- self.assertEqual(out, b'hello\n')
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
def test_run_with_network(self):
net_name = random_name()
@@ -95,7 +104,7 @@ class ContainerCollectionTest(BaseIntegrationTest):
"alpine", "echo hello",
log_config=dict(type='none')
)
- self.assertEqual(out, None)
+ assert out is None
def test_run_with_json_file_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -104,7 +113,49 @@ class ContainerCollectionTest(BaseIntegrationTest):
"alpine", "echo hello",
log_config=dict(type='json-file')
)
- self.assertEqual(out, b'hello\n')
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'echo hello', auto_remove=True
+ )
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ContainerError) as e:
+ client.containers.run(
+ 'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
+ )
+ assert e.value.exit_status == 1
+ assert e.value.stderr is None
+
+ def test_run_with_streamed_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+ logs = [line for line in out]
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
+
+ @pytest.mark.timeout(5)
+ def test_run_with_streamed_logs_and_cancel(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+
+ threading.Timer(1, out.close).start()
+
+ logs = [line for line in out]
+
+ assert len(logs) == 2
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -124,6 +175,28 @@ class ContainerCollectionTest(BaseIntegrationTest):
container = containers[0]
assert container.attrs['Config']['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+ def test_list_sparse(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list(sparse=True) if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+ with pytest.raises(docker.errors.DockerException):
+ container.labels
container.kill()
container.remove()
@@ -141,10 +214,9 @@ class ContainerTest(BaseIntegrationTest):
self.tmp_containers.append(container.id)
container.wait()
image = container.commit()
- self.assertEqual(
- client.containers.run(image.id, "cat /test", remove=True),
- b"hello\n"
- )
+ assert client.containers.run(
+ image.id, "cat /test", remove=True
+ ) == b"hello\n"
def test_diff(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -153,13 +225,24 @@ class ContainerTest(BaseIntegrationTest):
container.wait()
assert container.diff() == [{'Path': '/test', 'Kind': 1}]
- def test_exec_run(self):
+ def test_exec_run_success(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
)
self.tmp_containers.append(container.id)
- assert container.exec_run("cat /test") == b"hello\n"
+ exec_output = container.exec_run("cat /test")
+ assert exec_output[0] == 0
+ assert exec_output[1] == b"hello\n"
+
+ def test_exec_run_failed(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ exec_output = container.exec_run("docker ps")
+ assert exec_output[0] == 126
def test_kill(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -271,8 +354,8 @@ class ContainerTest(BaseIntegrationTest):
container = client.containers.run("alpine", "sh -c 'exit 0'",
detach=True)
self.tmp_containers.append(container.id)
- assert container.wait() == 0
+ assert container.wait()['StatusCode'] == 0
container = client.containers.run("alpine", "sh -c 'exit 1'",
detach=True)
self.tmp_containers.append(container.id)
- assert container.wait() == 1
+ assert container.wait()['StatusCode'] == 1
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
index 8f812d9..ae735ba 100644
--- a/tests/integration/models_images_test.py
+++ b/tests/integration/models_images_test.py
@@ -1,36 +1,39 @@
import io
+import tempfile
import docker
import pytest
-from .base import BaseIntegrationTest, TEST_API_VERSION
+from .base import BaseIntegrationTest, BUSYBOX, TEST_API_VERSION
class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.build(fileobj=io.BytesIO(
+ image, _ = client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
))
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
- @pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
+ # @pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
def test_build_with_error(self):
client = docker.from_env(version=TEST_API_VERSION)
- with self.assertRaises(docker.errors.BuildError) as cm:
+ with pytest.raises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
- "NOTADOCKERFILECOMMAND".encode('ascii')
+ "RUN exit 1".encode('ascii')
))
- assert str(cm.exception) == ("Unknown instruction: "
- "NOTADOCKERFILECOMMAND")
+ assert (
+ "The command '/bin/sh -c exit 1' returned a non-zero code: 1"
+ ) in cm.exconly()
+ assert cm.value.build_log
def test_build_with_multiple_success(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.build(
+ image, _ = client.images.build(
tag='some-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
@@ -41,7 +44,7 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_build_with_success_build_output(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.build(
+ image, _ = client.images.build(
tag='dup-txt-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo Successfully built abcd1234".encode('ascii')
@@ -71,6 +74,40 @@ class ImageCollectionTest(BaseIntegrationTest):
image = client.images.pull('alpine', tag='3.3')
assert 'alpine:3.3' in image.attrs['RepoTags']
+ def test_pull_with_sha(self):
+ image_ref = (
+ 'hello-world@sha256:083de497cff944f969d8499ab94f07134c50bcf5e6b95'
+ '59b27182d3fa80ce3f7'
+ )
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull(image_ref)
+ assert image_ref in image.attrs['RepoDigests']
+
+ def test_pull_multiple(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ images = client.images.pull('hello-world')
+ assert len(images) == 1
+ assert 'hello-world:latest' in images[0].attrs['RepoTags']
+
+ def test_load_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ImageLoadError):
+ client.images.load('abc')
+
+ def test_save_and_load(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(BUSYBOX)
+ with tempfile.TemporaryFile() as f:
+ stream = image.save()
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = client.images.load(f.read())
+
+ assert len(result) == 1
+ assert result[0].id == image.id
+
class ImageTest(BaseIntegrationTest):
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
index 105dcc5..08d7ad2 100644
--- a/tests/integration/models_networks_test.py
+++ b/tests/integration/models_networks_test.py
@@ -3,7 +3,7 @@ from .. import helpers
from .base import BaseIntegrationTest, TEST_API_VERSION
-class ImageCollectionTest(BaseIntegrationTest):
+class NetworkCollectionTest(BaseIntegrationTest):
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -47,7 +47,7 @@ class ImageCollectionTest(BaseIntegrationTest):
assert network.id not in [n.id for n in client.networks.list()]
-class ImageTest(BaseIntegrationTest):
+class NetworkTest(BaseIntegrationTest):
def test_connect_disconnect(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -59,6 +59,12 @@ class ImageTest(BaseIntegrationTest):
network.connect(container)
container.start()
assert client.networks.get(network.id).containers == [container]
+ network_containers = list(
+ c
+ for net in client.networks.list(ids=[network.id], greedy=True)
+ for c in net.containers
+ )
+ assert network_containers == [container]
network.disconnect(container)
assert network.containers == []
assert client.networks.get(network.id).containers == []
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
index 5823e6b..3c8d48a 100644
--- a/tests/integration/models_nodes_test.py
+++ b/tests/integration/models_nodes_test.py
@@ -15,7 +15,7 @@ class NodesTest(unittest.TestCase):
def test_list_get_update(self):
client = docker.from_env(version=TEST_API_VERSION)
- client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
nodes = client.nodes.list()
assert len(nodes) == 1
assert nodes[0].attrs['Spec']['Role'] == 'manager'
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
index 9b5676d..36caa85 100644
--- a/tests/integration/models_services_test.py
+++ b/tests/integration/models_services_test.py
@@ -5,6 +5,8 @@ import pytest
from .. import helpers
from .base import TEST_API_VERSION
+from docker.errors import InvalidArgument
+from docker.types.services import ServiceMode
class ServiceTest(unittest.TestCase):
@@ -12,7 +14,7 @@ class ServiceTest(unittest.TestCase):
def setUpClass(cls):
client = docker.from_env(version=TEST_API_VERSION)
helpers.force_leave_swarm(client)
- client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
@classmethod
def tearDownClass(cls):
@@ -36,6 +38,25 @@ class ServiceTest(unittest.TestCase):
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
+ def test_create_with_network(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(
+ helpers.random_name(), driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=name,
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ networks=[network.id]
+ )
+ assert 'Networks' in service.attrs['Spec']['TaskTemplate']
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert len(networks) == 1
+ assert networks[0]['Target'] == network.id
+
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
@@ -82,7 +103,6 @@ class ServiceTest(unittest.TestCase):
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service2.id
- @pytest.mark.skip(reason="Makes Swarm unstable?")
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
@@ -101,3 +121,215 @@ class ServiceTest(unittest.TestCase):
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Command'] == ["sleep", "600"]
+
+ def test_update_retains_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ labels = service.attrs['Spec']['Labels']
+ assert labels == {'service.label': 'SampleLabel'}
+
+ def test_update_retains_container_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container.label': 'SampleLabel'}
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Labels'] == {'container.label': 'SampleLabel'}
+
+ def test_update_remove_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ labels={},
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert not service.attrs['Spec'].get('Labels')
+
+ @pytest.mark.xfail(reason='Flaky test')
+ def test_update_retains_networks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network_name = helpers.random_name()
+ network = client.networks.create(
+ network_name, driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ networks=[network.id],
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.reload()
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert networks == [{'Target': network.id}]
+
+ def test_scale_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.update(
+ mode=docker.types.ServiceMode('replicated', replicas=2),
+ )
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.scale(2)
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_global_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ mode = ServiceMode('global')
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300",
+ mode=mode
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ with pytest.raises(InvalidArgument):
+ service.scale(2)
+
+ assert len(tasks) == 1
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=10,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_bool(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=True,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_shorthand_method(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.force_update()
+ service.reload()
+ assert service.version > initial_version
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
index e45ff3c..f39f0d3 100644
--- a/tests/integration/models_swarm_test.py
+++ b/tests/integration/models_swarm_test.py
@@ -4,6 +4,7 @@ import docker
from .. import helpers
from .base import TEST_API_VERSION
+import pytest
class SwarmTest(unittest.TestCase):
@@ -16,18 +17,17 @@ class SwarmTest(unittest.TestCase):
def test_init_update_leave(self):
client = docker.from_env(version=TEST_API_VERSION)
client.swarm.init(
- advertise_addr='eth0', snapshot_interval=5000,
+ advertise_addr='127.0.0.1', snapshot_interval=5000,
listen_addr=helpers.swarm_listen_addr()
)
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
client.swarm.update(snapshot_interval=10000)
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
+ assert client.swarm.id
assert client.swarm.leave(force=True)
- with self.assertRaises(docker.errors.APIError) as cm:
+ with pytest.raises(docker.errors.APIError) as cm:
client.swarm.reload()
assert (
- # FIXME: test for both until
- # https://github.com/docker/docker/issues/29192 is resolved
- cm.exception.response.status_code == 406 or
- cm.exception.response.status_code == 503
+ cm.value.response.status_code == 406 or
+ cm.value.response.status_code == 503
)
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index e3e6d9b..0fd4e43 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -5,15 +5,16 @@ import docker
import six
from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
- with self.assertRaises(docker.errors.APIError) as exc:
+ with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
- self.assertEqual(exc.exception.response.status_code, 500)
+ assert exc.value.response.status_code == 500
dfile.close()
def test_542_truncate_ids_client_side(self):
@@ -21,10 +22,10 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
- self.assertEqual(len(result[0]['Id']), 12)
+ assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
- with self.assertRaises(docker.errors.APIError):
+ with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
@@ -53,15 +54,12 @@ class TestRegressions(BaseAPIIntegrationTest):
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
- self.assertEqual(
- self.client.port(ctnr, 2000)[0]['HostPort'],
- six.text_type(tcp_port)
- )
- self.assertEqual(
- self.client.port(ctnr, '2000/tcp')[0]['HostPort'],
- six.text_type(tcp_port)
- )
- self.assertEqual(
- self.client.port(ctnr, '2000/udp')[0]['HostPort'],
- six.text_type(udp_port)
- )
+ assert self.client.port(
+ ctnr, 2000
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/tcp'
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/udp'
+ )[0]['HostPort'] == six.text_type(udp_port)
diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py
index 927aa97..a7f34fd 100644
--- a/tests/unit/api_build_test.py
+++ b/tests/unit/api_build_test.py
@@ -5,6 +5,7 @@ import docker
from docker import auth
from .api_test import BaseAPIClientTest, fake_request, url_prefix
+import pytest
class BuildTest(BaseAPIClientTest):
@@ -30,17 +31,6 @@ class BuildTest(BaseAPIClientTest):
self.client.build(fileobj=script, pull=True)
- def test_build_container_stream(self):
- script = io.BytesIO('\n'.join([
- 'FROM busybox',
- 'RUN mkdir -p /tmp/test',
- 'EXPOSE 8080',
- 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
- ' /tmp/silence.tar.gz'
- ]).encode('ascii'))
-
- self.client.build(fileobj=script, stream=True)
-
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
@@ -72,10 +62,12 @@ class BuildTest(BaseAPIClientTest):
def test_build_remote_with_registry_auth(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
@@ -84,7 +76,10 @@ class BuildTest(BaseAPIClientTest):
'forcerm': False,
'remote': 'https://github.com/docker-library/mongo'}
expected_headers = {
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
self.client.build(path='https://github.com/docker-library/mongo')
@@ -110,44 +105,53 @@ class BuildTest(BaseAPIClientTest):
})
def test_build_container_invalid_container_limits(self):
- self.assertRaises(
- docker.errors.DockerException,
- lambda: self.client.build('.', container_limits={
+ with pytest.raises(docker.errors.DockerException):
+ self.client.build('.', container_limits={
'foo': 'bar'
})
- )
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
headers = {}
expected_headers = {
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
+
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
def test_set_auth_headers_with_dict_and_auth_configs(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
headers = {'foo': 'bar'}
expected_headers = {
- 'foo': 'bar',
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ ),
+ 'foo': 'bar'
+ }
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
def test_set_auth_headers_with_dict_and_no_auth_configs(self):
headers = {'foo': 'bar'}
@@ -156,4 +160,4 @@ class BuildTest(BaseAPIClientTest):
}
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
index 3b135a8..c33f129 100644
--- a/tests/unit/api_container_test.py
+++ b/tests/unit/api_container_test.py
@@ -30,31 +30,20 @@ class StartContainerTest(BaseAPIClientTest):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/3cc2351ab11b/start'
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
assert 'data' not in args[1]
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
- self.assertEqual(
- str(excinfo.value),
- 'Resource ID was not provided',
- )
+ assert str(excinfo.value) == 'Resource ID was not provided'
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
- self.assertEqual(
- str(excinfo.value),
- 'Resource ID was not provided',
- )
+ assert str(excinfo.value) == 'Resource ID was not provided'
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
@@ -134,14 +123,9 @@ class StartContainerTest(BaseAPIClientTest):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/3cc2351ab11b/start'
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
assert 'data' not in args[1]
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
class CreateContainerTest(BaseAPIClientTest):
@@ -149,17 +133,15 @@ class CreateContainerTest(BaseAPIClientTest):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": false,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": false,
- "OpenStdin": false, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_binds(self):
mount_dest = '/mnt'
@@ -168,19 +150,17 @@ class CreateContainerTest(BaseAPIClientTest):
volumes=[mount_dest])
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls", "/mnt"], "AttachStdin": false,
- "Volumes": {"/mnt": {}},
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
@@ -189,82 +169,56 @@ class CreateContainerTest(BaseAPIClientTest):
volumes=mount_dest)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls", "/mnt"], "AttachStdin": false,
- "Volumes": {"/mnt": {}},
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "ExposedPorts": {
- "1111/tcp": {},
- "2222/udp": {},
- "3333/tcp": {}
- },
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "ExposedPorts": {
+ "1111/tcp": {},
+ "2222/udp": {},
+ "3333/tcp": {}
+ },
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["hello"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "Entrypoint": ["cowsay", "entry"]}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_cpu_shares(self):
- with pytest.deprecated_call():
- self.client.create_container('busybox', 'ls', cpu_shares=5)
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["hello"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "Entrypoint": ["cowsay", "entry"]}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
- args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "CpuShares": 5}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- @requires_api_version('1.18')
def test_create_container_with_host_config_cpu_shares(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -273,45 +227,22 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpuShares": 512,
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_cpuset(self):
- with pytest.deprecated_call():
- self.client.create_container('busybox', 'ls', cpuset='0,1')
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuShares": 512,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
- args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "Cpuset": "0,1",
- "CpusetCpus": "0,1"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- @requires_api_version('1.18')
def test_create_container_with_host_config_cpuset(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -320,25 +251,22 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpusetCpus": "0,1",
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- @requires_api_version('1.19')
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetCpus": "0,1",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
def test_create_container_with_host_config_cpuset_mems(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -347,23 +275,21 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpusetMems": "0",
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetMems": "0",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
@@ -373,87 +299,58 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
- self.assertIn('HostConfig', data)
- self.assertIn('CgroupParent', data['HostConfig'])
- self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
+ assert 'HostConfig' in data
+ assert 'CgroupParent' in data['HostConfig']
+ assert data['HostConfig']['CgroupParent'] == 'test'
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "WorkingDir": "/root"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "WorkingDir": "/root"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": true,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": true,
- "OpenStdin": true, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_volumes_from(self):
- vol_names = ['foo', 'bar']
- try:
- self.client.create_container('busybox', 'true',
- volumes_from=vol_names)
- except docker.errors.DockerException:
- self.assertTrue(
- docker.utils.compare_version('1.10', self.client._version) >= 0
- )
- return
-
- args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
- ','.join(vol_names))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_empty_volumes_from(self):
- with pytest.raises(docker.errors.InvalidVersion):
- self.client.create_container('busybox', 'true', volumes_from=[])
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": true,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": true,
+ "OpenStdin": true, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": false,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": false,
- "OpenStdin": false, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['params'] == {'name': 'marisa-kirisame'}
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
@@ -464,7 +361,7 @@ class CreateContainerTest(BaseAPIClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0)
+ assert data['HostConfig']['Memory'] == 128.0
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
@@ -475,7 +372,7 @@ class CreateContainerTest(BaseAPIClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0)
+ assert data['HostConfig']['Memory'] == 128.0
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
@@ -486,7 +383,7 @@ class CreateContainerTest(BaseAPIClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
+ assert data['HostConfig']['Memory'] == 128.0 * 1024
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
@@ -497,7 +394,7 @@ class CreateContainerTest(BaseAPIClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
@@ -508,20 +405,14 @@ class CreateContainerTest(BaseAPIClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(
- data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
- )
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024 * 1024
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
- self.assertRaises(
- docker.errors.DockerException,
- self.client.create_host_config, mem_limit='128p'
- )
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='128p')
- self.assertRaises(
- docker.errors.DockerException,
- self.client.create_host_config, mem_limit='1f28'
- )
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='1f28')
def test_create_container_with_lxc_conf(self):
self.client.create_container(
@@ -531,25 +422,16 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'],
- {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
@@ -559,20 +441,15 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
- self.assertEqual(
- json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
@@ -588,18 +465,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
@@ -615,18 +487,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
@@ -642,18 +509,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
@@ -680,21 +542,16 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_port_binds(self):
self.maxDiff = None
@@ -713,42 +570,31 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
- self.assertTrue('1111/tcp' in port_bindings)
- self.assertTrue('2222/tcp' in port_bindings)
- self.assertTrue('3333/udp' in port_bindings)
- self.assertTrue('4444/tcp' in port_bindings)
- self.assertTrue('5555/tcp' in port_bindings)
- self.assertTrue('6666/tcp' in port_bindings)
- self.assertEqual(
- [{"HostPort": "", "HostIp": ""}],
- port_bindings['1111/tcp']
- )
- self.assertEqual(
- [{"HostPort": "2222", "HostIp": ""}],
- port_bindings['2222/tcp']
- )
- self.assertEqual(
- [{"HostPort": "3333", "HostIp": ""}],
- port_bindings['3333/udp']
- )
- self.assertEqual(
- [{"HostPort": "", "HostIp": "127.0.0.1"}],
- port_bindings['4444/tcp']
- )
- self.assertEqual(
- [{"HostPort": "5555", "HostIp": "127.0.0.1"}],
- port_bindings['5555/tcp']
- )
- self.assertEqual(len(port_bindings['6666/tcp']), 2)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert '1111/tcp' in port_bindings
+ assert '2222/tcp' in port_bindings
+ assert '3333/udp' in port_bindings
+ assert '4444/tcp' in port_bindings
+ assert '5555/tcp' in port_bindings
+ assert '6666/tcp' in port_bindings
+ assert [{"HostPort": "", "HostIp": ""}] == port_bindings['1111/tcp']
+ assert [
+ {"HostPort": "2222", "HostIp": ""}
+ ] == port_bindings['2222/tcp']
+ assert [
+ {"HostPort": "3333", "HostIp": ""}
+ ] == port_bindings['3333/udp']
+ assert [
+ {"HostPort": "", "HostIp": "127.0.0.1"}
+ ] == port_bindings['4444/tcp']
+ assert [
+ {"HostPort": "5555", "HostIp": "127.0.0.1"}
+ ] == port_bindings['5555/tcp']
+ assert len(port_bindings['6666/tcp']) == 2
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_mac_address(self):
expected = "02:42:ac:11:00:0a"
@@ -760,7 +606,7 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
assert data['MacAddress'] == expected
@@ -775,17 +621,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_multiple_links(self):
link_path = 'path'
@@ -801,16 +643,14 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
@@ -823,15 +663,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_privileged(self):
self.client.create_container(
@@ -843,14 +681,10 @@ class CreateContainerTest(BaseAPIClientTest):
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_restart_policy(self):
self.client.create_container(
@@ -863,21 +697,17 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
+ assert json.loads(args[1]['data']) == expected_payload
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_added_capabilities(self):
self.client.create_container(
@@ -886,17 +716,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
@@ -905,17 +731,13 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_devices(self):
self.client.create_container(
@@ -927,7 +749,7 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
@@ -941,13 +763,9 @@ class CreateContainerTest(BaseAPIClientTest):
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_labels_dict(self):
labels_dict = {
@@ -961,14 +779,10 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_labels_list(self):
labels_list = [
@@ -986,14 +800,10 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
@@ -1010,39 +820,31 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_stop_signal(self):
self.client.create_container('busybox', 'ls',
stop_signal='SIGINT')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "StopSignal": "SIGINT"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "StopSignal": "SIGINT"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
@requires_api_version('1.22')
def test_create_container_with_aliases(self):
@@ -1059,22 +861,22 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "NetworkMode": "some-network"
- },
- "NetworkingConfig": {
- "EndpointsConfig": {
- "some-network": {"Aliases": ["foo", "bar"]}
- }
- }}'''))
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "NetworkMode": "some-network"
+ },
+ "NetworkingConfig": {
+ "EndpointsConfig": {
+ "some-network": {"Aliases": ["foo", "bar"]}
+ }
+ }}
+ ''')
@requires_api_version('1.22')
def test_create_container_with_tmpfs_list(self):
@@ -1089,21 +891,16 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
@requires_api_version('1.22')
def test_create_container_with_tmpfs_dict(self):
@@ -1118,21 +915,16 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
@requires_api_version('1.24')
def test_create_container_with_sysctl(self):
@@ -1147,19 +939,15 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Sysctls'] = {
'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
@@ -1176,8 +964,8 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Env'] == expected
@requires_api_version('1.25')
def test_create_container_with_host_config_cpus(self):
@@ -1190,26 +978,23 @@ class CreateContainerTest(BaseAPIClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpuCount": 1,
- "CpuPercent": 20,
- "NanoCpus": 1000,
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuCount": 1,
+ "CpuPercent": 20,
+ "NanoCpus": 1000,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
class ContainerTest(BaseAPIClientTest):
@@ -1263,7 +1048,8 @@ class ContainerTest(BaseAPIClientTest):
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
- timeout=None
+ timeout=None,
+ params={}
)
def test_wait_with_dict_instead_of_id(self):
@@ -1272,7 +1058,8 @@ class ContainerTest(BaseAPIClientTest):
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
- timeout=None
+ timeout=None,
+ params={}
)
def test_logs(self):
@@ -1289,10 +1076,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- self.assertEqual(
- logs,
- 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
- )
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
@@ -1308,10 +1092,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- self.assertEqual(
- logs,
- 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
- )
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
def test_log_streaming(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
@@ -1424,7 +1205,7 @@ class ContainerTest(BaseAPIClientTest):
def test_log_since_with_invalid_value_raises_error(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
- with self.assertRaises(docker.errors.InvalidArgument):
+ with pytest.raises(docker.errors.InvalidArgument):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=42.42)
@@ -1437,7 +1218,7 @@ class ContainerTest(BaseAPIClientTest):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
- self.assertTrue(m.called)
+ assert m.called
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
@@ -1621,9 +1402,7 @@ class ContainerTest(BaseAPIClientTest):
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
- self.assertEqual(
- excinfo.value.args[0], 'Resource ID was not provided'
- )
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
@@ -1662,13 +1441,8 @@ class ContainerTest(BaseAPIClientTest):
blkio_weight=345
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/3cc2351ab11b/update'
- )
- self.assertEqual(
- json.loads(args[1]['data']),
- {'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345}
- )
- self.assertEqual(
- args[1]['headers']['Content-Type'], 'application/json'
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/update'
+ assert json.loads(args[1]['data']) == {
+ 'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345
+ }
+ assert args[1]['headers']['Content-Type'] == 'application/json'
diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py
index 41ee940..a9d2dd5 100644
--- a/tests/unit/api_exec_test.py
+++ b/tests/unit/api_exec_test.py
@@ -11,85 +11,65 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
- self.assertEqual(
- 'POST',
- args[0][0], url_prefix + 'containers/{0}/exec'.format(
- fake_api.FAKE_CONTAINER_ID
- )
+ assert 'POST' == args[0][0], url_prefix + 'containers/{0}/exec'.format(
+ fake_api.FAKE_CONTAINER_ID
)
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'AttachStdout': True,
- 'Container': fake_api.FAKE_CONTAINER_ID,
- 'Cmd': ['ls', '-1'],
- 'Privileged': False,
- 'AttachStdin': False,
- 'AttachStderr': True,
- 'User': ''
- }
- )
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'AttachStdout': True,
+ 'Container': fake_api.FAKE_CONTAINER_ID,
+ 'Cmd': ['ls', '-1'],
+ 'Privileged': False,
+ 'AttachStdin': False,
+ 'AttachStderr': True,
+ 'User': ''
+ }
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_exec_start(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/start'.format(
- fake_api.FAKE_EXEC_ID
- )
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
)
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'Detach': False,
- }
- )
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': False,
+ }
- self.assertEqual(
- args[1]['headers'], {
- 'Content-Type': 'application/json',
- 'Connection': 'Upgrade',
- 'Upgrade': 'tcp'
- }
- )
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json',
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
def test_exec_start_detached(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/start'.format(
- fake_api.FAKE_EXEC_ID
- )
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
)
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'Detach': True
- }
- )
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': True
+ }
- self.assertEqual(
- args[1]['headers'], {
- 'Content-Type': 'application/json'
- }
- )
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json'
+ }
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/json'.format(
- fake_api.FAKE_EXEC_ID
- )
+ assert args[0][1] == url_prefix + 'exec/{0}/json'.format(
+ fake_api.FAKE_EXEC_ID
)
def test_exec_resize(self):
diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py
index f1e42cc..1e2315d 100644
--- a/tests/unit/api_image_test.py
+++ b/tests/unit/api_image_test.py
@@ -65,29 +65,21 @@ class ImageTest(BaseAPIClientTest):
self.client.pull('joffrey/test001')
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'images/create'
- )
- self.assertEqual(
- args[1]['params'],
- {'tag': None, 'fromImage': 'joffrey/test001'}
- )
- self.assertFalse(args[1]['stream'])
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert not args[1]['stream']
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'images/create'
- )
- self.assertEqual(
- args[1]['params'],
- {'tag': None, 'fromImage': 'joffrey/test001'}
- )
- self.assertTrue(args[1]['stream'])
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert args[1]['stream']
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
@@ -203,29 +195,7 @@ class ImageTest(BaseAPIClientTest):
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
- self.assertEqual(
- excinfo.value.args[0], 'Resource ID was not provided'
- )
-
- def test_insert_image(self):
- try:
- self.client.insert(fake_api.FAKE_IMAGE_NAME,
- fake_api.FAKE_URL, fake_api.FAKE_PATH)
- except docker.errors.DeprecatedMethod:
- self.assertTrue(
- docker.utils.compare_version('1.12', self.client._version) >= 0
- )
- return
-
- fake_request.assert_called_with(
- 'POST',
- url_prefix + 'images/test_image/insert',
- params={
- 'url': fake_api.FAKE_URL,
- 'path': fake_api.FAKE_PATH
- },
- timeout=DEFAULT_TIMEOUT_SECONDS
- )
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
def test_push_image(self):
with mock.patch('docker.auth.resolve_authconfig',
diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py
index 96cdc4b..c78554d 100644
--- a/tests/unit/api_network_test.py
+++ b/tests/unit/api_network_test.py
@@ -3,7 +3,6 @@ import json
import six
from .api_test import BaseAPIClientTest, url_prefix, response
-from ..helpers import requires_api_version
from docker.types import IPAMConfig, IPAMPool
try:
@@ -13,7 +12,6 @@ except ImportError:
class NetworkTest(BaseAPIClientTest):
- @requires_api_version('1.21')
def test_list_networks(self):
networks = [
{
@@ -34,22 +32,21 @@ class NetworkTest(BaseAPIClientTest):
status_code=200, content=json.dumps(networks).encode('utf-8')))
with mock.patch('docker.api.client.APIClient.get', get):
- self.assertEqual(self.client.networks(), networks)
+ assert self.client.networks() == networks
- self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
+ assert get.call_args[0][0] == url_prefix + 'networks'
filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertFalse(filters)
+ assert not filters
self.client.networks(names=['foo'])
filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertEqual(filters, {'name': ['foo']})
+ assert filters == {'name': ['foo']}
self.client.networks(ids=['123'])
filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertEqual(filters, {'id': ['123']})
+ assert filters == {'id': ['123']}
- @requires_api_version('1.21')
def test_create_network(self):
network_data = {
"id": 'abc12345',
@@ -61,15 +58,11 @@ class NetworkTest(BaseAPIClientTest):
with mock.patch('docker.api.client.APIClient.post', post):
result = self.client.create_network('foo')
- self.assertEqual(result, network_data)
+ assert result == network_data
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/create')
+ assert post.call_args[0][0] == url_prefix + 'networks/create'
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {"Name": "foo"})
+ assert json.loads(post.call_args[1]['data']) == {"Name": "foo"}
opts = {
'com.docker.network.bridge.enable_icc': False,
@@ -77,9 +70,9 @@ class NetworkTest(BaseAPIClientTest):
}
self.client.create_network('foo', 'bridge', opts)
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {"Name": "foo", "Driver": "bridge", "Options": opts})
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "foo", "Driver": "bridge", "Options": opts
+ }
ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
gateway="192.168.52.254")
@@ -88,23 +81,20 @@ class NetworkTest(BaseAPIClientTest):
self.client.create_network("bar", driver="bridge",
ipam=ipam_config)
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {
- "Name": "bar",
- "Driver": "bridge",
- "IPAM": {
- "Driver": "default",
- "Config": [{
- "IPRange": None,
- "Gateway": "192.168.52.254",
- "Subnet": "192.168.52.0/24",
- "AuxiliaryAddresses": None,
- }],
- }
- })
-
- @requires_api_version('1.21')
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "bar",
+ "Driver": "bridge",
+ "IPAM": {
+ "Driver": "default",
+ "Config": [{
+ "IPRange": None,
+ "Gateway": "192.168.52.254",
+ "Subnet": "192.168.52.0/24",
+ "AuxiliaryAddresses": None,
+ }],
+ }
+ }
+
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
@@ -113,10 +103,8 @@ class NetworkTest(BaseAPIClientTest):
self.client.remove_network(network_id)
args = delete.call_args
- self.assertEqual(args[0][0],
- url_prefix + 'networks/{0}'.format(network_id))
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
- @requires_api_version('1.21')
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
@@ -132,13 +120,11 @@ class NetworkTest(BaseAPIClientTest):
with mock.patch('docker.api.client.APIClient.get', get):
result = self.client.inspect_network(network_id)
- self.assertEqual(result, network_data)
+ assert result == network_data
args = get.call_args
- self.assertEqual(args[0][0],
- url_prefix + 'networks/{0}'.format(network_id))
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
- @requires_api_version('1.21')
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
@@ -153,21 +139,18 @@ class NetworkTest(BaseAPIClientTest):
links=[('baz', 'quux')]
)
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/{0}/connect'.format(network_id))
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/connect'.format(network_id)
+ )
+
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id,
+ 'EndpointConfig': {
+ 'Aliases': ['foo', 'bar'],
+ 'Links': ['baz:quux'],
+ },
+ }
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {
- 'Container': container_id,
- 'EndpointConfig': {
- 'Aliases': ['foo', 'bar'],
- 'Links': ['baz:quux'],
- },
- })
-
- @requires_api_version('1.21')
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
@@ -178,10 +161,9 @@ class NetworkTest(BaseAPIClientTest):
self.client.disconnect_container_from_network(
container={'Id': container_id}, net_id=network_id)
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/{0}/disconnect'.format(network_id))
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {'Container': container_id})
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/disconnect'.format(network_id)
+ )
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id
+ }
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 6ac92c4..46cbd68 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -128,34 +128,27 @@ class DockerApiTest(BaseAPIClientTest):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
- self.assertEqual(
- str(excinfo.value),
- 'Version parameter must be a string or None. Found float'
- )
+ assert str(
+ excinfo.value
+ ) == 'Version parameter must be a string or None. Found float'
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
- self.assertEqual(
- url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
- )
+ assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
- self.assertEqual(
- url,
- '{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
+ assert url == '{0}{1}'.format(
+ url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
- self.assertEqual(
- url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
- )
+ assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
- self.assertEqual(
- url,
- '{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
+ assert url == '{0}{1}'.format(
+ url_prefix, 'images/localhost:5000/image/push'
)
def test_url_invalid_resource(self):
@@ -164,15 +157,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_url_no_resource(self):
url = self.client._url('/simple')
- self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
+ assert url == '{0}{1}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
- self.assertEqual(
- url, '{0}{1}'.format(url_base, 'hello/somename/world')
- )
+ assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
@@ -194,13 +185,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_retrieve_server_version(self):
client = APIClient(version="auto")
- self.assertTrue(isinstance(client._version, six.string_types))
- self.assertFalse(client._version == "auto")
+ assert isinstance(client._version, six.string_types)
+ assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
- self.assertTrue(isinstance(version, six.string_types))
+ assert isinstance(version, six.string_types)
def test_info(self):
self.client.info()
@@ -221,6 +212,24 @@ class DockerApiTest(BaseAPIClientTest):
timeout=DEFAULT_TIMEOUT_SECONDS
)
+ def test_login(self):
+ self.client.login('sakuya', 'izayoi')
+ args = fake_request.call_args
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'auth'
+ assert json.loads(args[1]['data']) == {
+ 'username': 'sakuya', 'password': 'izayoi'
+ }
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert self.client._auth_configs['auths'] == {
+ 'docker.io': {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
+ }
+ }
+
def test_events(self):
self.client.events()
@@ -313,11 +322,10 @@ class DockerApiTest(BaseAPIClientTest):
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
- self.assertIn('SecurityOpt', result)
- self.assertEqual(result['SecurityOpt'], security_opt)
- self.assertRaises(
- TypeError, self.client.create_host_config, security_opt='wrong'
- )
+ assert 'SecurityOpt' in result
+ assert result['SecurityOpt'] == security_opt
+ with pytest.raises(TypeError):
+ self.client.create_host_config(security_opt='wrong')
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
@@ -335,26 +343,26 @@ class DockerApiTest(BaseAPIClientTest):
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
- self.assertEqual(result, content_str)
+ assert result == content_str
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
- self.assertEqual(result, content)
+ assert result == content
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
- self.assertEqual(result, content_str.decode('utf-8'))
+ assert result == content_str.decode('utf-8')
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
- self.assertEqual(result, content)
+ assert result == content
class StreamTest(unittest.TestCase):
@@ -442,21 +450,19 @@ class StreamTest(unittest.TestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with APIClient(base_url="http+unix://" + self.socket_file) \
- as client:
+ with APIClient(base_url="http+unix://" + self.socket_file) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
- stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
- self.assertEqual(list(stream), [
- str(i).encode() for i in range(50)])
+ assert list(stream) == [
+ str(i).encode() for i in range(50)]
class UserAgentTest(unittest.TestCase):
@@ -475,18 +481,18 @@ class UserAgentTest(unittest.TestCase):
client = APIClient()
client.version()
- self.assertEqual(self.mock_send.call_count, 1)
+ assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
- self.assertEqual(headers['User-Agent'], expected)
+ assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
- self.assertEqual(self.mock_send.call_count, 1)
+ assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
- self.assertEqual(headers['User-Agent'], 'foo/bar')
+ assert headers['User-Agent'] == 'foo/bar'
class DisableSocketTest(unittest.TestCase):
@@ -509,7 +515,7 @@ class DisableSocketTest(unittest.TestCase):
self.client._disable_socket_timeout(socket)
- self.assertEqual(socket.timeout, None)
+ assert socket.timeout is None
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
@@ -519,8 +525,8 @@ class DisableSocketTest(unittest.TestCase):
self.client._disable_socket_timeout(socket)
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, None)
+ assert socket.timeout is None
+ assert socket._sock.timeout is None
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
@@ -529,5 +535,5 @@ class DisableSocketTest(unittest.TestCase):
self.client._disable_socket_timeout(socket)
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, 0.0)
+ assert socket.timeout is None
+ assert socket._sock.timeout == 0.0
diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py
index fc2a556..7850c22 100644
--- a/tests/unit/api_volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -7,17 +7,15 @@ from .api_test import BaseAPIClientTest, url_prefix, fake_request
class VolumeTest(BaseAPIClientTest):
- @requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
- self.assertIn('Volumes', volumes)
- self.assertEqual(len(volumes['Volumes']), 2)
+ assert 'Volumes' in volumes
+ assert len(volumes['Volumes']) == 2
args = fake_request.call_args
- self.assertEqual(args[0][0], 'GET')
- self.assertEqual(args[0][1], url_prefix + 'volumes')
+ assert args[0][0] == 'GET'
+ assert args[0][1] == url_prefix + 'volumes'
- @requires_api_version('1.21')
def test_list_volumes_and_filters(self):
volumes = self.client.volumes(filters={'dangling': True})
assert 'Volumes' in volumes
@@ -29,29 +27,28 @@ class VolumeTest(BaseAPIClientTest):
assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'},
'timeout': 60}
- @requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
args = fake_request.call_args
- self.assertEqual(args[0][0], 'POST')
- self.assertEqual(args[0][1], url_prefix + 'volumes/create')
- self.assertEqual(json.loads(args[1]['data']), {'Name': name})
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
+ assert json.loads(args[1]['data']) == {'Name': name}
@requires_api_version('1.23')
def test_create_volume_with_labels(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name, labels={
- 'com.example.some-label': 'some-value'})
- self.assertEqual(
- result["Labels"],
- {'com.example.some-label': 'some-value'}
- )
+ 'com.example.some-label': 'some-value'
+ })
+ assert result["Labels"] == {
+ 'com.example.some-label': 'some-value'
+ }
@requires_api_version('1.23')
def test_create_volume_with_invalid_labels(self):
@@ -59,20 +56,18 @@ class VolumeTest(BaseAPIClientTest):
with pytest.raises(TypeError):
self.client.create_volume(name, labels=1)
- @requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
self.client.create_volume(name, driver=driver_name)
args = fake_request.call_args
- self.assertEqual(args[0][0], 'POST')
- self.assertEqual(args[0][1], url_prefix + 'volumes/create')
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
data = json.loads(args[1]['data'])
- self.assertIn('Driver', data)
- self.assertEqual(data['Driver'], driver_name)
+ assert 'Driver' in data
+ assert data['Driver'] == driver_name
- @requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
@@ -92,31 +87,29 @@ class VolumeTest(BaseAPIClientTest):
@requires_api_version('1.24')
def test_create_volume_with_no_specified_name(self):
result = self.client.create_volume(name=None)
- self.assertIn('Name', result)
- self.assertNotEqual(result['Name'], None)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
- self.assertIn('Scope', result)
- self.assertEqual(result['Scope'], 'local')
-
- @requires_api_version('1.21')
+ assert 'Name' in result
+ assert result['Name'] is not None
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+ assert 'Scope' in result
+ assert result['Scope'] == 'local'
+
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
args = fake_request.call_args
- self.assertEqual(args[0][0], 'GET')
- self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
+ assert args[0][0] == 'GET'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
- @requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
self.client.remove_volume(name)
args = fake_request.call_args
- self.assertEqual(args[0][0], 'DELETE')
- self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
+ assert args[0][0] == 'DELETE'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 56fd50c..ee32ca0 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -9,10 +9,8 @@ import shutil
import tempfile
import unittest
-from py.test import ensuretemp
-from pytest import mark
-
from docker import auth, errors
+import pytest
try:
from unittest import mock
@@ -33,82 +31,68 @@ class RegressionTest(unittest.TestCase):
class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('image'),
- ('docker.io', 'image'),
+ assert auth.resolve_repository_name('image') == (
+ 'docker.io', 'image'
)
def test_resolve_repository_name_dotted_hub_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('image.valid'),
- ('docker.io', 'image.valid')
+ assert auth.resolve_repository_name('image.valid') == (
+ 'docker.io', 'image.valid'
)
def test_resolve_repository_name_hub_image(self):
- self.assertEqual(
- auth.resolve_repository_name('username/image'),
- ('docker.io', 'username/image'),
+ assert auth.resolve_repository_name('username/image') == (
+ 'docker.io', 'username/image'
)
def test_explicit_hub_index_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('docker.io/image'),
- ('docker.io', 'image')
+ assert auth.resolve_repository_name('docker.io/image') == (
+ 'docker.io', 'image'
)
def test_explicit_legacy_hub_index_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('index.docker.io/image'),
- ('docker.io', 'image')
+ assert auth.resolve_repository_name('index.docker.io/image') == (
+ 'docker.io', 'image'
)
def test_resolve_repository_name_private_registry(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net/image'),
- ('my.registry.net', 'image'),
+ assert auth.resolve_repository_name('my.registry.net/image') == (
+ 'my.registry.net', 'image'
)
def test_resolve_repository_name_private_registry_with_port(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net:5000/image'),
- ('my.registry.net:5000', 'image'),
+ assert auth.resolve_repository_name('my.registry.net:5000/image') == (
+ 'my.registry.net:5000', 'image'
)
def test_resolve_repository_name_private_registry_with_username(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net/username/image'),
- ('my.registry.net', 'username/image'),
- )
+ assert auth.resolve_repository_name(
+ 'my.registry.net/username/image'
+ ) == ('my.registry.net', 'username/image')
def test_resolve_repository_name_no_dots_but_port(self):
- self.assertEqual(
- auth.resolve_repository_name('hostname:5000/image'),
- ('hostname:5000', 'image'),
+ assert auth.resolve_repository_name('hostname:5000/image') == (
+ 'hostname:5000', 'image'
)
def test_resolve_repository_name_no_dots_but_port_and_username(self):
- self.assertEqual(
- auth.resolve_repository_name('hostname:5000/username/image'),
- ('hostname:5000', 'username/image'),
- )
+ assert auth.resolve_repository_name(
+ 'hostname:5000/username/image'
+ ) == ('hostname:5000', 'username/image')
def test_resolve_repository_name_localhost(self):
- self.assertEqual(
- auth.resolve_repository_name('localhost/image'),
- ('localhost', 'image'),
+ assert auth.resolve_repository_name('localhost/image') == (
+ 'localhost', 'image'
)
def test_resolve_repository_name_localhost_with_username(self):
- self.assertEqual(
- auth.resolve_repository_name('localhost/username/image'),
- ('localhost', 'username/image'),
+ assert auth.resolve_repository_name('localhost/username/image') == (
+ 'localhost', 'username/image'
)
def test_invalid_index_name(self):
- self.assertRaises(
- errors.InvalidRepository,
- lambda: auth.resolve_repository_name('-gecko.com/image')
- )
+ with pytest.raises(errors.InvalidRepository):
+ auth.resolve_repository_name('-gecko.com/image')
def encode_auth(auth_info):
@@ -122,154 +106,122 @@ class ResolveAuthTest(unittest.TestCase):
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = auth.parse_auth({
- 'https://index.docker.io/v1/': index_config,
- 'my.registry.net': private_config,
- 'http://legacy.registry.url/v1/': legacy_config,
- })
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': index_config,
+ 'my.registry.net': private_config,
+ 'http://legacy.registry.url/v1/': legacy_config,
+ })
+ }
def test_resolve_authconfig_hostname_only(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_protocol(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_trailing_slash(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'https://my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://index.docker.io'
- )['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://index.docker.io'
+ )['username'] == 'indexuser'
def test_resolve_authconfig_path_wrong_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'https://my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_default_registry(self):
- self.assertEqual(
- auth.resolve_authconfig(self.auth_config)['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config
+ )['username'] == 'indexuser'
def test_resolve_authconfig_default_explicit_none(self):
- self.assertEqual(
- auth.resolve_authconfig(self.auth_config, None)['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, None
+ )['username'] == 'indexuser'
def test_resolve_authconfig_fully_explicit(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_legacy_config(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'legacy.registry.url'
- )['username'],
- 'legacyauth'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'legacy.registry.url'
+ )['username'] == 'legacyauth'
def test_resolve_authconfig_no_match(self):
- self.assertTrue(
- auth.resolve_authconfig(self.auth_config, 'does.not.exist') is None
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'does.not.exist'
+ ) is None
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'privateuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'privateuser'
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- ),
- None,
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ ) is None
+
+ def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': self.index_config,
+ }),
+ 'credsStore': 'blackbox'
+ }
+ with mock.patch('docker.auth._resolve_authconfig_credstore') as m:
+ m.return_value = None
+ assert 'indexuser' == auth.resolve_authconfig(
+ auth_config, None
+ )['username']
class CredStoreTest(unittest.TestCase):
@@ -323,62 +275,12 @@ class CredStoreTest(unittest.TestCase):
) == 'truesecret'
-class FindConfigFileTest(unittest.TestCase):
- def tmpdir(self, name):
- tmpdir = ensuretemp(name)
- self.addCleanup(tmpdir.remove)
- return tmpdir
-
- def test_find_config_fallback(self):
- tmpdir = self.tmpdir('test_find_config_fallback')
-
- with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
- assert auth.find_config_file() is None
-
- def test_find_config_from_explicit_path(self):
- tmpdir = self.tmpdir('test_find_config_from_explicit_path')
- config_path = tmpdir.ensure('my-config-file.json')
-
- assert auth.find_config_file(str(config_path)) == str(config_path)
-
- def test_find_config_from_environment(self):
- tmpdir = self.tmpdir('test_find_config_from_environment')
- config_path = tmpdir.ensure('config.json')
-
- with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
- assert auth.find_config_file() == str(config_path)
-
- @mark.skipif("sys.platform == 'win32'")
- def test_find_config_from_home_posix(self):
- tmpdir = self.tmpdir('test_find_config_from_home_posix')
- config_path = tmpdir.ensure('.docker', 'config.json')
-
- with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
- assert auth.find_config_file() == str(config_path)
-
- @mark.skipif("sys.platform == 'win32'")
- def test_find_config_from_home_legacy_name(self):
- tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
- config_path = tmpdir.ensure('.dockercfg')
-
- with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
- assert auth.find_config_file() == str(config_path)
-
- @mark.skipif("sys.platform != 'win32'")
- def test_find_config_from_home_windows(self):
- tmpdir = self.tmpdir('test_find_config_from_home_windows')
- config_path = tmpdir.ensure('.docker', 'config.json')
-
- with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
- assert auth.find_config_file() == str(config_path)
-
-
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = auth.load_config(folder)
- self.assertTrue(cfg is not None)
+ assert cfg is not None
def test_load_config(self):
folder = tempfile.mkdtemp()
@@ -390,12 +292,12 @@ class LoadConfigTest(unittest.TestCase):
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(dockercfg_path)
assert auth.INDEX_NAME in cfg
- self.assertNotEqual(cfg[auth.INDEX_NAME], None)
+ assert cfg[auth.INDEX_NAME] is not None
cfg = cfg[auth.INDEX_NAME]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
@@ -418,12 +320,12 @@ class LoadConfigTest(unittest.TestCase):
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
+ assert cfg[registry] is not None
cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
@@ -445,12 +347,12 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
+ assert cfg[registry] is not None
cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env_with_auths(self):
folder = tempfile.mkdtemp()
@@ -473,13 +375,12 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
- cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env_utf8(self):
folder = tempfile.mkdtemp()
@@ -503,37 +404,12 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
- cfg = cfg[registry]
- self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
- self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
-
- def test_load_config_custom_config_env_with_headers(self):
- folder = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, folder)
-
- dockercfg_path = os.path.join(folder, 'config.json')
- config = {
- 'HttpHeaders': {
- 'Name': 'Spike',
- 'Surname': 'Spiegel'
- },
- }
-
- with open(dockercfg_path, 'w') as f:
- json.dump(config, f)
-
- with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
- cfg = auth.load_config(None)
- assert 'HttpHeaders' in cfg
- self.assertNotEqual(cfg['HttpHeaders'], None)
- cfg = cfg['HttpHeaders']
-
- self.assertEqual(cfg['Name'], 'Spike')
- self.assertEqual(cfg['Surname'], 'Spiegel')
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
+ assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_unknown_keys(self):
folder = tempfile.mkdtemp()
@@ -561,7 +437,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'scarlet.net': {}}
+ assert cfg == {'auths': {'scarlet.net': {}}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
@@ -582,7 +458,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert registry in cfg
- cfg = cfg[registry]
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index c4996f1..cce99c5 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -8,6 +8,7 @@ import os
import unittest
from . import fake_api
+import pytest
try:
from unittest import mock
@@ -51,25 +52,25 @@ class ClientTest(unittest.TestCase):
def test_call_api_client_method(self):
client = docker.from_env()
- with self.assertRaises(AttributeError) as cm:
+ with pytest.raises(AttributeError) as cm:
client.create_container()
- s = str(cm.exception)
+ s = cm.exconly()
assert "'DockerClient' object has no attribute 'create_container'" in s
assert "this method is now on the object APIClient" in s
- with self.assertRaises(AttributeError) as cm:
+ with pytest.raises(AttributeError) as cm:
client.abcdef()
- s = str(cm.exception)
+ s = cm.exconly()
assert "'DockerClient' object has no attribute 'abcdef'" in s
assert "this method is now on the object APIClient" not in s
def test_call_containers(self):
client = docker.DockerClient(**kwargs_from_env())
- with self.assertRaises(TypeError) as cm:
+ with pytest.raises(TypeError) as cm:
client.containers()
- s = str(cm.exception)
+ s = cm.exconly()
assert "'ContainerCollection' object is not callable" in s
assert "docker.APIClient" in s
@@ -90,22 +91,22 @@ class FromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
client = docker.from_env()
- self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
+ assert client.api.base_url == "https://192.168.59.103:2376"
def test_from_env_with_version(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
client = docker.from_env(version='2.32')
- self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
- self.assertEqual(client.api._version, '2.32')
+ assert client.api.base_url == "https://192.168.59.103:2376"
+ assert client.api._version == '2.32'
def test_from_env_without_version_uses_default(self):
client = docker.from_env()
- self.assertEqual(client.api._version, DEFAULT_DOCKER_API_VERSION)
+ assert client.api._version == DEFAULT_DOCKER_API_VERSION
def test_from_env_without_timeout_uses_default(self):
client = docker.from_env()
- self.assertEqual(client.api.timeout, DEFAULT_TIMEOUT_SECONDS)
+ assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
index 8dbb35e..2be0578 100644
--- a/tests/unit/dockertypes_test.py
+++ b/tests/unit/dockertypes_test.py
@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
import unittest
-import warnings
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import InvalidArgument, InvalidVersion
from docker.types import (
- ContainerConfig, ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
+ ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
IPAMPool, LogConfig, Mount, ServiceMode, Ulimit,
)
+from docker.types.services import convert_service_ports
try:
from unittest import mock
@@ -23,88 +23,75 @@ def create_host_config(*args, **kwargs):
class HostConfigTest(unittest.TestCase):
- def test_create_host_config_no_options(self):
- config = create_host_config(version='1.19')
- self.assertFalse('NetworkMode' in config)
-
def test_create_host_config_no_options_newer_api_version(self):
- config = create_host_config(version='1.20')
- self.assertEqual(config['NetworkMode'], 'default')
+ config = create_host_config(version='1.21')
+ assert config['NetworkMode'] == 'default'
def test_create_host_config_invalid_cpu_cfs_types(self):
with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota='0')
+ create_host_config(version='1.21', cpu_quota='0')
with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period='0')
+ create_host_config(version='1.21', cpu_period='0')
with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota=23.11)
+ create_host_config(version='1.21', cpu_quota=23.11)
with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period=1999.0)
+ create_host_config(version='1.21', cpu_period=1999.0)
def test_create_host_config_with_cpu_quota(self):
- config = create_host_config(version='1.20', cpu_quota=1999)
- self.assertEqual(config.get('CpuQuota'), 1999)
+ config = create_host_config(version='1.21', cpu_quota=1999)
+ assert config.get('CpuQuota') == 1999
def test_create_host_config_with_cpu_period(self):
- config = create_host_config(version='1.20', cpu_period=1999)
- self.assertEqual(config.get('CpuPeriod'), 1999)
+ config = create_host_config(version='1.21', cpu_period=1999)
+ assert config.get('CpuPeriod') == 1999
def test_create_host_config_with_blkio_constraints(self):
blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
- config = create_host_config(version='1.22',
- blkio_weight=1999,
- blkio_weight_device=blkio_rate,
- device_read_bps=blkio_rate,
- device_write_bps=blkio_rate,
- device_read_iops=blkio_rate,
- device_write_iops=blkio_rate)
-
- self.assertEqual(config.get('BlkioWeight'), 1999)
- self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
- self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
- self.assertEqual(blkio_rate[0]['Rate'], 1000)
+ config = create_host_config(
+ version='1.22', blkio_weight=1999, blkio_weight_device=blkio_rate,
+ device_read_bps=blkio_rate, device_write_bps=blkio_rate,
+ device_read_iops=blkio_rate, device_write_iops=blkio_rate
+ )
+
+ assert config.get('BlkioWeight') == 1999
+ assert config.get('BlkioWeightDevice') is blkio_rate
+ assert config.get('BlkioDeviceReadBps') is blkio_rate
+ assert config.get('BlkioDeviceWriteBps') is blkio_rate
+ assert config.get('BlkioDeviceReadIOps') is blkio_rate
+ assert config.get('BlkioDeviceWriteIOps') is blkio_rate
+ assert blkio_rate[0]['Path'] == "/dev/sda"
+ assert blkio_rate[0]['Rate'] == 1000
def test_create_host_config_with_shm_size(self):
config = create_host_config(version='1.22', shm_size=67108864)
- self.assertEqual(config.get('ShmSize'), 67108864)
+ assert config.get('ShmSize') == 67108864
def test_create_host_config_with_shm_size_in_mb(self):
config = create_host_config(version='1.22', shm_size='64M')
- self.assertEqual(config.get('ShmSize'), 67108864)
+ assert config.get('ShmSize') == 67108864
def test_create_host_config_with_oom_kill_disable(self):
- config = create_host_config(version='1.20', oom_kill_disable=True)
- self.assertEqual(config.get('OomKillDisable'), True)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.18.3',
- oom_kill_disable=True))
+ config = create_host_config(version='1.21', oom_kill_disable=True)
+ assert config.get('OomKillDisable') is True
def test_create_host_config_with_userns_mode(self):
config = create_host_config(version='1.23', userns_mode='host')
- self.assertEqual(config.get('UsernsMode'), 'host')
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.22',
- userns_mode='host'))
- self.assertRaises(
- ValueError, lambda: create_host_config(version='1.23',
- userns_mode='host12'))
+ assert config.get('UsernsMode') == 'host'
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', userns_mode='host')
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', userns_mode='host12')
def test_create_host_config_with_oom_score_adj(self):
config = create_host_config(version='1.22', oom_score_adj=100)
- self.assertEqual(config.get('OomScoreAdj'), 100)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.21',
- oom_score_adj=100))
- self.assertRaises(
- TypeError, lambda: create_host_config(version='1.22',
- oom_score_adj='100'))
+ assert config.get('OomScoreAdj') == 100
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.21', oom_score_adj=100)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.22', oom_score_adj='100')
def test_create_host_config_with_dns_opt(self):
@@ -112,30 +99,20 @@ class HostConfigTest(unittest.TestCase):
config = create_host_config(version='1.21', dns_opt=tested_opts)
dns_opts = config.get('DnsOptions')
- self.assertTrue('use-vc' in dns_opts)
- self.assertTrue('no-tld-query' in dns_opts)
-
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.20',
- dns_opt=tested_opts))
+ assert 'use-vc' in dns_opts
+ assert 'no-tld-query' in dns_opts
def test_create_host_config_with_mem_reservation(self):
config = create_host_config(version='1.21', mem_reservation=67108864)
- self.assertEqual(config.get('MemoryReservation'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', mem_reservation=67108864))
+ assert config.get('MemoryReservation') == 67108864
def test_create_host_config_with_kernel_memory(self):
config = create_host_config(version='1.21', kernel_memory=67108864)
- self.assertEqual(config.get('KernelMemory'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', kernel_memory=67108864))
+ assert config.get('KernelMemory') == 67108864
def test_create_host_config_with_pids_limit(self):
config = create_host_config(version='1.23', pids_limit=1024)
- self.assertEqual(config.get('PidsLimit'), 1024)
+ assert config.get('PidsLimit') == 1024
with pytest.raises(InvalidVersion):
create_host_config(version='1.22', pids_limit=1024)
@@ -144,7 +121,7 @@ class HostConfigTest(unittest.TestCase):
def test_create_host_config_with_isolation(self):
config = create_host_config(version='1.24', isolation='hyperv')
- self.assertEqual(config.get('Isolation'), 'hyperv')
+ assert config.get('Isolation') == 'hyperv'
with pytest.raises(InvalidVersion):
create_host_config(version='1.23', isolation='hyperv')
@@ -167,9 +144,6 @@ class HostConfigTest(unittest.TestCase):
create_host_config(version='1.24', mem_swappiness='40')
def test_create_host_config_with_volume_driver(self):
- with pytest.raises(InvalidVersion):
- create_host_config(version='1.20', volume_driver='local')
-
config = create_host_config(version='1.21', volume_driver='local')
assert config.get('VolumeDriver') == 'local'
@@ -179,10 +153,9 @@ class HostConfigTest(unittest.TestCase):
def test_create_host_config_with_cpu_count(self):
config = create_host_config(version='1.25', cpu_count=2)
- self.assertEqual(config.get('CpuCount'), 2)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.24', cpu_count=1))
+ assert config.get('CpuCount') == 2
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_count=1)
def test_create_host_config_invalid_cpu_percent_types(self):
with pytest.raises(TypeError):
@@ -190,10 +163,9 @@ class HostConfigTest(unittest.TestCase):
def test_create_host_config_with_cpu_percent(self):
config = create_host_config(version='1.25', cpu_percent=15)
- self.assertEqual(config.get('CpuPercent'), 15)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.24', cpu_percent=10))
+ assert config.get('CpuPercent') == 15
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_percent=10)
def test_create_host_config_invalid_nano_cpus_types(self):
with pytest.raises(TypeError):
@@ -201,23 +173,29 @@ class HostConfigTest(unittest.TestCase):
def test_create_host_config_with_nano_cpus(self):
config = create_host_config(version='1.25', nano_cpus=1000)
- self.assertEqual(config.get('NanoCpus'), 1000)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.24', nano_cpus=1))
-
-
-class ContainerConfigTest(unittest.TestCase):
- def test_create_container_config_volume_driver_warning(self):
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
- ContainerConfig(
- version='1.21', image='scratch', command=None,
- volume_driver='local'
- )
+ assert config.get('NanoCpus') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', nano_cpus=1)
+
+ def test_create_host_config_with_cpu_rt_period_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_period='1000')
- assert len(w) == 1
- assert 'The volume_driver option has been moved' in str(w[0].message)
+ def test_create_host_config_with_cpu_rt_period(self):
+ config = create_host_config(version='1.25', cpu_rt_period=1000)
+ assert config.get('CPURealtimePeriod') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_period=1000)
+
+ def test_ctrate_host_config_with_cpu_rt_runtime_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_runtime='1000')
+
+ def test_create_host_config_with_cpu_rt_runtime(self):
+ config = create_host_config(version='1.25', cpu_rt_runtime=1000)
+ assert config.get('CPURealtimeRuntime') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_runtime=1000)
class ContainerSpecTest(unittest.TestCase):
@@ -242,43 +220,46 @@ class UlimitTest(unittest.TestCase):
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['name']
+ assert ulimit_obj.soft == ulimit_dct['soft']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
def test_create_host_config_dict_ulimit_capitals(self):
ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
- self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['Name']
+ assert ulimit_obj.soft == ulimit_dct['Soft']
+ assert ulimit_obj.hard == ulimit_dct['Hard']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
def test_create_host_config_obj_ulimit(self):
ulimit_dct = Ulimit(name='nofile', soft=8096)
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj, ulimit_dct)
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj == ulimit_dct
def test_ulimit_invalid_type(self):
- self.assertRaises(ValueError, lambda: Ulimit(name=None))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
+ with pytest.raises(ValueError):
+ Ulimit(name=None)
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', soft='123')
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', hard='456')
class LogConfigTest(unittest.TestCase):
@@ -287,18 +268,18 @@ class LogConfigTest(unittest.TestCase):
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=dct
)
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(dct['type'], config['LogConfig'].type)
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert dct['type'] == config['LogConfig'].type
def test_create_host_config_obj_logconfig(self):
obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=obj
)
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(obj, config['LogConfig'])
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert obj == config['LogConfig']
def test_logconfig_invalid_config_type(self):
with pytest.raises(ValueError):
@@ -320,7 +301,7 @@ class IPAMConfigTest(unittest.TestCase):
gateway='192.168.52.254')
ipam_config = IPAMConfig(pool_configs=[ipam_pool])
- self.assertEqual(ipam_config, {
+ assert ipam_config == {
'Driver': 'default',
'Config': [{
'Subnet': '192.168.52.0/24',
@@ -328,7 +309,7 @@ class IPAMConfigTest(unittest.TestCase):
'AuxiliaryAddresses': None,
'IPRange': None,
}]
- })
+ }
class ServiceModeTest(unittest.TestCase):
@@ -413,3 +394,77 @@ class MountTest(unittest.TestCase):
assert mount['Source'] == "C:/foo/bar"
assert mount['Target'] == "/baz"
assert mount['Type'] == 'bind'
+
+
+class ServicePortsTest(unittest.TestCase):
+ def test_convert_service_ports_simple(self):
+ ports = {8080: 80}
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol(self):
+ ports = {8080: (80, 'udp')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol_and_mode(self):
+ ports = {8080: (80, 'udp', 'ingress')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'ingress',
+ }]
+
+ def test_convert_service_ports_invalid(self):
+ ports = {8080: ('way', 'too', 'many', 'items', 'here')}
+
+ with pytest.raises(ValueError):
+ convert_service_ports(ports)
+
+ def test_convert_service_ports_no_protocol_and_mode(self):
+ ports = {8080: (80, None, 'host')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ }]
+
+ def test_convert_service_ports_multiple(self):
+ ports = {
+ 8080: (80, None, 'host'),
+ 9999: 99,
+ 2375: (2375,)
+ }
+
+ converted_ports = convert_service_ports(ports)
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 9999,
+ 'TargetPort': 99,
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 2375,
+ 'TargetPort': 2375,
+ } in converted_ports
+
+ assert len(converted_ports) == 3
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
index 9678669..e27a9b1 100644
--- a/tests/unit/errors_test.py
+++ b/tests/unit/errors_test.py
@@ -3,7 +3,8 @@ import unittest
import requests
from docker.errors import (APIError, ContainerError, DockerException,
- create_unexpected_kwargs_error)
+ create_unexpected_kwargs_error,
+ create_api_error_from_http_exception)
from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
@@ -78,6 +79,19 @@ class APIErrorTest(unittest.TestCase):
err = APIError('', response=resp)
assert err.is_client_error() is True
+ def test_create_error_from_exception(self):
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ try:
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
+
class ContainerErrorTest(unittest.TestCase):
def test_container_without_stderr(self):
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index 2ba85bb..e609b64 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -21,21 +21,36 @@ FAKE_NODE_ID = '24ifsmvkjbyhk'
# for clarity and readability
-def get_fake_raw_version():
+def get_fake_version():
status_code = 200
response = {
- "ApiVersion": "1.18",
- "GitCommit": "fake-commit",
- "GoVersion": "go1.3.3",
- "Version": "1.5.0"
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Components': [{
+ 'Details': {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Experimental': 'false',
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux'
+ },
+ 'Name': 'Engine',
+ 'Version': '18.01.0-ce'
+ }],
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux',
+ 'Platform': {'Name': ''},
+ 'Version': '18.01.0-ce'
}
- return status_code, response
-
-def get_fake_version():
- status_code = 200
- response = {'GoVersion': '1', 'Version': '1.1.1',
- 'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
@@ -205,7 +220,9 @@ def get_fake_wait():
def get_fake_logs():
status_code = 200
- response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x00'
+ b'\x02\x00\x00\x00\x00\x00\x00\x00'
+ b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
@@ -435,6 +452,10 @@ def post_fake_update_node():
return 200, None
+def post_fake_join_swarm():
+ return 200, None
+
+
def get_fake_network_list():
return 200, [{
"Name": "bridge",
@@ -491,13 +512,13 @@ def post_fake_network_disconnect():
# Maps real api url to fake response callback
-prefix = 'http+docker://localunixsocket'
+prefix = 'http+docker://localhost'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
'{0}/version'.format(prefix):
- get_fake_raw_version,
+ get_fake_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
@@ -599,6 +620,8 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
+ ('{1}/{0}/swarm/join'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_join_swarm,
('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_network_list,
('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
index 47890ac..15b60ea 100644
--- a/tests/unit/fake_api_client.py
+++ b/tests/unit/fake_api_client.py
@@ -43,10 +43,10 @@ def make_fake_api_client():
fake_api.get_fake_inspect_container()[1],
'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
'inspect_network.return_value': fake_api.get_fake_network()[1],
- 'logs.return_value': 'hello world\n',
+ 'logs.return_value': [b'hello world\n'],
'networks.return_value': fake_api.get_fake_network_list()[1],
'start.return_value': None,
- 'wait.return_value': 0,
+ 'wait.return_value': {'StatusCode': 0},
})
mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
return mock_client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
index 5eaa45a..2b0b499 100644
--- a/tests/unit/models_containers_test.py
+++ b/tests/unit/models_containers_test.py
@@ -1,10 +1,12 @@
import docker
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
from docker.models.containers import Container, _create_container_args
from docker.models.images import Image
import unittest
from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
from .fake_api_client import make_fake_client
+import pytest
class ContainerCollectionTest(unittest.TestCase):
@@ -12,7 +14,7 @@ class ContainerCollectionTest(unittest.TestCase):
client = make_fake_client()
out = client.containers.run("alpine", "echo hello world")
- assert out == 'hello world\n'
+ assert out == b'hello world\n'
client.api.create_container.assert_called_with(
image="alpine",
@@ -24,9 +26,8 @@ class ContainerCollectionTest(unittest.TestCase):
client.api.start.assert_called_with(FAKE_CONTAINER_ID)
client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
client.api.logs.assert_called_with(
- FAKE_CONTAINER_ID,
- stderr=False,
- stdout=True
+ FAKE_CONTAINER_ID, stderr=False, stdout=True, stream=True,
+ follow=True
)
def test_create_container_args(self):
@@ -102,6 +103,7 @@ class ContainerCollectionTest(unittest.TestCase):
'volumename:/mnt/vol3',
'/volumewithnohostpath',
'/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
],
volumes_from=['container'],
working_dir='/code'
@@ -120,7 +122,8 @@ class ContainerCollectionTest(unittest.TestCase):
'/var/www:/mnt/vol1:ro',
'volumename:/mnt/vol3',
'/volumewithnohostpath',
- '/anothervolumewithnohostpath:ro'
+ '/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
],
'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
@@ -191,7 +194,8 @@ class ContainerCollectionTest(unittest.TestCase):
'/mnt/vol1',
'/mnt/vol3',
'/volumewithnohostpath',
- '/anothervolumewithnohostpath'
+ '/anothervolumewithnohostpath',
+ 'D:\\hello\\world'
],
working_dir='/code'
)
@@ -226,17 +230,17 @@ class ContainerCollectionTest(unittest.TestCase):
container = client.containers.run('alpine', 'sleep 300', detach=True)
assert container.id == FAKE_CONTAINER_ID
- client.api.pull.assert_called_with('alpine', tag=None)
+ client.api.pull.assert_called_with('alpine', platform=None, tag=None)
def test_run_with_error(self):
client = make_fake_client()
client.api.logs.return_value = "some error"
- client.api.wait.return_value = 1
+ client.api.wait.return_value = {'StatusCode': 1}
- with self.assertRaises(docker.errors.ContainerError) as cm:
+ with pytest.raises(docker.errors.ContainerError) as cm:
client.containers.run('alpine', 'echo hello world')
- assert cm.exception.exit_status == 1
- assert "some error" in str(cm.exception)
+ assert cm.value.exit_status == 1
+ assert "some error" in cm.exconly()
def test_run_with_image_object(self):
client = make_fake_client()
@@ -257,8 +261,8 @@ class ContainerCollectionTest(unittest.TestCase):
client.api.remove_container.assert_not_called()
client = make_fake_client()
- client.api.wait.return_value = 1
- with self.assertRaises(docker.errors.ContainerError):
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
client.containers.run("alpine")
client.api.remove_container.assert_not_called()
@@ -267,19 +271,19 @@ class ContainerCollectionTest(unittest.TestCase):
client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
client = make_fake_client()
- client.api.wait.return_value = 1
- with self.assertRaises(docker.errors.ContainerError):
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
client.containers.run("alpine", remove=True)
client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
client = make_fake_client()
client.api._version = '1.24'
- with self.assertRaises(RuntimeError):
+ with pytest.raises(RuntimeError):
client.containers.run("alpine", detach=True, remove=True)
client = make_fake_client()
client.api._version = '1.23'
- with self.assertRaises(RuntimeError):
+ with pytest.raises(RuntimeError):
client.containers.run("alpine", detach=True, remove=True)
client = make_fake_client()
@@ -395,23 +399,41 @@ class ContainerTest(unittest.TestCase):
container.exec_run("echo hello world", privileged=True, stream=True)
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
- stdin=False, tty=False, privileged=True, user='', environment=None
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
)
client.api.exec_start.assert_called_with(
FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
)
+ def test_exec_run_failure(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("docker ps", privileged=True, stream=False)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False
+ )
+
def test_export(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.export()
- client.api.export.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.export.assert_called_with(
+ FAKE_CONTAINER_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
def test_get_archive(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.get_archive('foo')
- client.api.get_archive.assert_called_with(FAKE_CONTAINER_ID, 'foo')
+ client.api.get_archive.assert_called_with(
+ FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
+ )
def test_image(self):
client = make_fake_client()
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
index 9ecb7e4..6783279 100644
--- a/tests/unit/models_images_test.py
+++ b/tests/unit/models_images_test.py
@@ -1,3 +1,4 @@
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
from docker.models.images import Image
import unittest
@@ -41,9 +42,22 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull(self):
client = make_fake_client()
- image = client.images.pull('test_image')
+ image = client.images.pull('test_image:latest')
+ client.api.pull.assert_called_with('test_image', tag='latest')
+ client.api.inspect_image.assert_called_with('test_image:latest')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_pull_multiple(self):
+ client = make_fake_client()
+ images = client.images.pull('test_image')
client.api.pull.assert_called_with('test_image', tag=None)
- client.api.inspect_image.assert_called_with('test_image')
+ client.api.images.assert_called_with(
+ all=False, name='test_image', filters=None
+ )
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert len(images) == 1
+ image = images[0]
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
@@ -103,7 +117,9 @@ class ImageTest(unittest.TestCase):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.save()
- client.api.get_image.assert_called_with(FAKE_IMAGE_ID)
+ client.api.get_image.assert_called_with(
+ FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
def test_tag(self):
client = make_fake_client()
diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py
index 943b904..58c9fce 100644
--- a/tests/unit/models_networks_test.py
+++ b/tests/unit/models_networks_test.py
@@ -4,7 +4,7 @@ from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
from .fake_api_client import make_fake_client
-class ImageCollectionTest(unittest.TestCase):
+class NetworkCollectionTest(unittest.TestCase):
def test_create(self):
client = make_fake_client()
@@ -37,7 +37,7 @@ class ImageCollectionTest(unittest.TestCase):
assert client.api.networks.called_once_with(names=["foobar"])
-class ImageTest(unittest.TestCase):
+class NetworkTest(unittest.TestCase):
def test_connect(self):
client = make_fake_client()
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
index e7e317d..247bb4a 100644
--- a/tests/unit/models_services_test.py
+++ b/tests/unit/models_services_test.py
@@ -35,18 +35,18 @@ class CreateServiceKwargsTest(unittest.TestCase):
'labels': {'key': 'value'},
'mode': 'global',
'update_config': {'update': 'config'},
- 'networks': ['somenet'],
'endpoint_spec': {'blah': 'blah'},
}
assert set(task_template.keys()) == set([
'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
- 'LogDriver'
+ 'LogDriver', 'Networks'
])
assert task_template['Placement'] == {'Constraints': ['foo=bar']}
assert task_template['LogDriver'] == {
'Name': 'logdriver',
'Options': {'foo': 'bar'}
}
+ assert task_template['Networks'] == [{'Target': 'somenet'}]
assert set(task_template['ContainerSpec'].keys()) == set([
'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
'Labels', 'Mounts', 'StopGracePeriod'
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 2b7ce52..73b7336 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -1,5 +1,6 @@
import unittest
from docker.transport import ssladapter
+import pytest
try:
from backports.ssl_match_hostname import (
@@ -69,11 +70,9 @@ class MatchHostnameTest(unittest.TestCase):
assert match_hostname(self.cert, 'touhou.gensokyo.jp') is None
def test_match_ip_address_failure(self):
- self.assertRaises(
- CertificateError, match_hostname, self.cert, '192.168.0.25'
- )
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, '192.168.0.25')
def test_match_dns_failure(self):
- self.assertRaises(
- CertificateError, match_hostname, self.cert, 'foobar.co.uk'
- )
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, 'foobar.co.uk')
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
index 374f8b2..4385380 100644
--- a/tests/unit/swarm_test.py
+++ b/tests/unit/swarm_test.py
@@ -21,12 +21,51 @@ class SwarmTest(BaseAPIClientTest):
node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
+ assert args[0][1] == (
+ url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
)
- self.assertEqual(
- json.loads(args[1]['data']), node_spec
- )
- self.assertEqual(
- args[1]['headers']['Content-Type'], 'application/json'
+ assert json.loads(args[1]['data']) == node_spec
+ assert args[1]['headers']['Content-Type'] == 'application/json'
+
+ @requires_api_version('1.24')
+ def test_join_swarm(self):
+ remote_addr = ['1.2.3.4:2377']
+ listen_addr = '2.3.4.5:2377'
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(
+ remote_addrs=remote_addr,
+ listen_addr=listen_addr,
+ join_token=join_token
)
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
+
+ @requires_api_version('1.24')
+ def test_join_swarm_no_listen_address_takes_default(self):
+ remote_addr = ['1.2.3.4:2377']
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': '0.0.0.0:2377',
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(remote_addrs=remote_addr, join_token=join_token)
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
new file mode 100644
index 0000000..50ba383
--- /dev/null
+++ b/tests/unit/utils_config_test.py
@@ -0,0 +1,123 @@
+import os
+import unittest
+import shutil
+import tempfile
+import json
+
+from py.test import ensuretemp
+from pytest import mark
+from docker.utils import config
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class FindConfigFileTest(unittest.TestCase):
+ def tmpdir(self, name):
+ tmpdir = ensuretemp(name)
+ self.addCleanup(tmpdir.remove)
+ return tmpdir
+
+ def test_find_config_fallback(self):
+ tmpdir = self.tmpdir('test_find_config_fallback')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() is None
+
+ def test_find_config_from_explicit_path(self):
+ tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ config_path = tmpdir.ensure('my-config-file.json')
+
+ assert config.find_config_file(str(config_path)) == str(config_path)
+
+ def test_find_config_from_environment(self):
+ tmpdir = self.tmpdir('test_find_config_from_environment')
+ config_path = tmpdir.ensure('config.json')
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_posix(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_legacy_name(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ config_path = tmpdir.ensure('.dockercfg')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform != 'win32'")
+ def test_find_config_from_home_windows(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = config.load_general_config(folder)
+ assert cfg is not None
+ assert isinstance(cfg, dict)
+ assert not cfg
+
+ def test_load_config_custom_headers(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'HttpHeaders': {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ },
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert 'HttpHeaders' in cfg
+ assert cfg['HttpHeaders'] == {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ }
+
+ def test_load_config_detach_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert cfg == config_data
+
+ def test_load_config_from_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = config.load_general_config(None)
+ assert cfg == config_data
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 2fa1d05..00456e8 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -23,7 +23,6 @@ from docker.utils import (
decode_json_header, tar, split_command, parse_devices, update_headers,
)
-from docker.utils.build import should_check_directory
from docker.utils.ports import build_port_bindings, split_port
from docker.utils.utils import format_environment
@@ -46,7 +45,7 @@ class DecoratorsTest(unittest.TestCase):
return headers
client = APIClient()
- client._auth_configs = {}
+ client._general_configs = {}
g = update_headers(f)
assert g(client, headers=None) is None
@@ -55,7 +54,7 @@ class DecoratorsTest(unittest.TestCase):
'Content-type': 'application/json',
}
- client._auth_configs = {
+ client._general_configs = {
'HttpHeaders': sample_headers
}
@@ -80,25 +79,25 @@ class KwargsFromEnvTest(unittest.TestCase):
os.environ.pop('DOCKER_TLS_VERIFY', None)
kwargs = kwargs_from_env()
- self.assertEqual(None, kwargs.get('base_url'))
- self.assertEqual(None, kwargs.get('tls'))
+ assert kwargs.get('base_url') is None
+ assert kwargs.get('tls') is None
def test_kwargs_from_env_tls(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
- self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
- self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
- self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
- self.assertTrue('key.pem' in kwargs['tls'].cert[1])
- self.assertEqual(False, kwargs['tls'].assert_hostname)
- self.assertTrue(kwargs['tls'].verify)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is False
+ assert kwargs['tls'].verify
try:
client = APIClient(**kwargs)
- self.assertEqual(kwargs['base_url'], client.base_url)
- self.assertEqual(kwargs['tls'].ca_cert, client.verify)
- self.assertEqual(kwargs['tls'].cert, client.cert)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].ca_cert == client.verify
+ assert kwargs['tls'].cert == client.cert
except TypeError as e:
self.fail(e)
@@ -107,17 +106,17 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
- self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
- self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
- self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
- self.assertTrue('key.pem' in kwargs['tls'].cert[1])
- self.assertEqual(True, kwargs['tls'].assert_hostname)
- self.assertEqual(False, kwargs['tls'].verify)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is True
+ assert kwargs['tls'].verify is False
try:
client = APIClient(**kwargs)
- self.assertEqual(kwargs['base_url'], client.base_url)
- self.assertEqual(kwargs['tls'].cert, client.cert)
- self.assertFalse(kwargs['tls'].verify)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].cert == client.cert
+ assert not kwargs['tls'].verify
except TypeError as e:
self.fail(e)
@@ -131,7 +130,7 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_TLS_VERIFY='')
os.environ.pop('DOCKER_CERT_PATH', None)
kwargs = kwargs_from_env(assert_hostname=True)
- self.assertEqual('tcp://192.168.59.103:2376', kwargs['base_url'])
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
def test_kwargs_from_env_no_cert_path(self):
try:
@@ -144,10 +143,10 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env()
- self.assertTrue(kwargs['tls'].verify)
- self.assertIn(cert_dir, kwargs['tls'].ca_cert)
- self.assertIn(cert_dir, kwargs['tls'].cert[0])
- self.assertIn(cert_dir, kwargs['tls'].cert[1])
+ assert kwargs['tls'].verify
+ assert cert_dir in kwargs['tls'].ca_cert
+ assert cert_dir in kwargs['tls'].cert[0]
+ assert cert_dir in kwargs['tls'].cert[1]
finally:
if temp_dir:
shutil.rmtree(temp_dir)
@@ -169,12 +168,12 @@ class KwargsFromEnvTest(unittest.TestCase):
class ConverVolumeBindsTest(unittest.TestCase):
def test_convert_volume_binds_empty(self):
- self.assertEqual(convert_volume_binds({}), [])
- self.assertEqual(convert_volume_binds([]), [])
+ assert convert_volume_binds({}) == []
+ assert convert_volume_binds([]) == []
def test_convert_volume_binds_list(self):
data = ['/a:/a:ro', '/b:/c:z']
- self.assertEqual(convert_volume_binds(data), data)
+ assert convert_volume_binds(data) == data
def test_convert_volume_binds_complete(self):
data = {
@@ -183,13 +182,13 @@ class ConverVolumeBindsTest(unittest.TestCase):
'mode': 'ro'
}
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:ro']
def test_convert_volume_binds_compact(self):
data = {
'/mnt/vol1': '/data'
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_no_mode(self):
data = {
@@ -197,7 +196,7 @@ class ConverVolumeBindsTest(unittest.TestCase):
'bind': '/data'
}
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_unicode_bytes_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
@@ -208,9 +207,7 @@ class ConverVolumeBindsTest(unittest.TestCase):
'mode': 'rw'
}
}
- self.assertEqual(
- convert_volume_binds(data), expected
- )
+ assert convert_volume_binds(data) == expected
def test_convert_volume_binds_unicode_unicode_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
@@ -221,9 +218,7 @@ class ConverVolumeBindsTest(unittest.TestCase):
'mode': 'rw'
}
}
- self.assertEqual(
- convert_volume_binds(data), expected
- )
+ assert convert_volume_binds(data) == expected
class ParseEnvFileTest(unittest.TestCase):
@@ -242,38 +237,35 @@ class ParseEnvFileTest(unittest.TestCase):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file,
- {'USER': 'jdoe', 'PASS': 'secret'})
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_with_equals_character(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=sec==ret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file,
- {'USER': 'jdoe', 'PASS': 'sec==ret'})
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'}
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
+ assert get_parse_env_file == {'USER': 'jdoe'}
os.unlink(env_file)
def test_parse_env_file_newline(self):
env_file = self.generate_tempfile(
file_content='\nUSER=jdoe\n\n\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file,
- {'USER': 'jdoe', 'PASS': 'secret'})
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
- self.assertRaises(
- DockerException, parse_env_file, env_file)
+ with pytest.raises(DockerException):
+ parse_env_file(env_file)
os.unlink(env_file)
@@ -343,46 +335,34 @@ class ParseRepositoryTagTest(unittest.TestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("root"), ("root", None)
- )
+ assert parse_repository_tag("root") == ("root", None)
def test_index_image_tag(self):
- self.assertEqual(
- parse_repository_tag("root:tag"), ("root", "tag")
- )
+ assert parse_repository_tag("root:tag") == ("root", "tag")
def test_index_user_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("user/repo"), ("user/repo", None)
- )
+ assert parse_repository_tag("user/repo") == ("user/repo", None)
def test_index_user_image_tag(self):
- self.assertEqual(
- parse_repository_tag("user/repo:tag"), ("user/repo", "tag")
- )
+ assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag")
def test_private_reg_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo"), ("url:5000/repo", None)
- )
+ assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", None)
def test_private_reg_image_tag(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")
+ assert parse_repository_tag("url:5000/repo:tag") == (
+ "url:5000/repo", "tag"
)
def test_index_image_sha(self):
- self.assertEqual(
- parse_repository_tag("root@sha256:{0}".format(self.sha)),
- ("root", "sha256:{0}".format(self.sha))
+ assert parse_repository_tag("root@sha256:{0}".format(self.sha)) == (
+ "root", "sha256:{0}".format(self.sha)
)
def test_private_reg_image_sha(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)),
- ("url:5000/repo", "sha256:{0}".format(self.sha))
- )
+ assert parse_repository_tag(
+ "url:5000/repo@sha256:{0}".format(self.sha)
+ ) == ("url:5000/repo", "sha256:{0}".format(self.sha))
class ParseDeviceTest(unittest.TestCase):
@@ -392,35 +372,35 @@ class ParseDeviceTest(unittest.TestCase):
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
}])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
- })
+ }
def test_partial_string_definition(self):
devices = parse_devices(['/dev/sda1'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/sda1',
'CgroupPermissions': 'rwm'
- })
+ }
def test_permissionless_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rwm'
- })
+ }
def test_full_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
- })
+ }
def test_hybrid_list(self):
devices = parse_devices([
@@ -432,36 +412,38 @@ class ParseDeviceTest(unittest.TestCase):
}
])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rw'
- })
- self.assertEqual(devices[1], {
+ }
+ assert devices[1] == {
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
- })
+ }
class ParseBytesTest(unittest.TestCase):
def test_parse_bytes_valid(self):
- self.assertEqual(parse_bytes("512MB"), 536870912)
- self.assertEqual(parse_bytes("512M"), 536870912)
- self.assertEqual(parse_bytes("512m"), 536870912)
+ assert parse_bytes("512MB") == 536870912
+ assert parse_bytes("512M") == 536870912
+ assert parse_bytes("512m") == 536870912
def test_parse_bytes_invalid(self):
- self.assertRaises(DockerException, parse_bytes, "512MK")
- self.assertRaises(DockerException, parse_bytes, "512L")
- self.assertRaises(DockerException, parse_bytes, "127.0.0.1K")
+ with pytest.raises(DockerException):
+ parse_bytes("512MK")
+ with pytest.raises(DockerException):
+ parse_bytes("512L")
+ with pytest.raises(DockerException):
+ parse_bytes("127.0.0.1K")
def test_parse_bytes_float(self):
- self.assertRaises(DockerException, parse_bytes, "1.5k")
+ with pytest.raises(DockerException):
+ parse_bytes("1.5k")
def test_parse_bytes_maxint(self):
- self.assertEqual(
- parse_bytes("{0}k".format(sys.maxsize)), sys.maxsize * 1024
- )
+ assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
class UtilsTest(unittest.TestCase):
@@ -476,7 +458,7 @@ class UtilsTest(unittest.TestCase):
]
for filters, expected in tests:
- self.assertEqual(convert_filters(filters), expected)
+ assert convert_filters(filters) == expected
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
@@ -486,144 +468,144 @@ class UtilsTest(unittest.TestCase):
else:
data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
- self.assertEqual(obj, decoded_data)
+ assert obj == decoded_data
class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
- self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
+ assert split_command(u'echo μμ') == ['echo', 'μμ']
@pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
def test_split_command_with_bytes(self):
- self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
+ assert split_command('echo μμ') == ['echo', 'μμ']
class PortsTest(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, [("127.0.0.1", "1000")])
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
- self.assertEqual(internal_port, ["2000/udp"])
- self.assertEqual(external_port, [("127.0.0.1", "1000")])
+ assert internal_port == ["2000/udp"]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, [("127.0.0.1", None)])
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", None)]
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port,
- [("127.0.0.1", None), ("127.0.0.1", None)])
+ assert internal_port == ["2000", "2001"]
+ assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)]
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, ["1000"])
+ assert internal_port == ["2000"]
+ assert external_port == ["1000"]
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port, ["1000", "1001"])
+ assert internal_port == ["2000", "2001"]
+ assert external_port == ["1000", "1001"]
def test_split_port_random_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, ["1000-1001"])
+ assert internal_port == ["2000"]
+ assert external_port == ["1000-1001"]
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, None)
+ assert internal_port == ["2000"]
+ assert external_port is None
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port, None)
+ assert internal_port == ["2000", "2001"]
+ assert external_port is None
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
- self.assertEqual(internal_port, ["2000/udp", "2001/udp"])
- self.assertEqual(external_port,
- [("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
+ assert internal_port == ["2000/udp", "2001/udp"]
+ assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]
def test_split_port_with_ipv6_address(self):
internal_port, external_port = split_port(
"2001:abcd:ef00::2:1000:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, [("2001:abcd:ef00::2", "1000")])
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
def test_split_port_invalid(self):
- self.assertRaises(ValueError,
- lambda: split_port("0.0.0.0:1000:2000:tcp"))
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000:tcp")
def test_non_matching_length_port_ranges(self):
- self.assertRaises(
- ValueError,
- lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp")
- )
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000-1010:2000-2002/tcp")
def test_port_and_range_invalid(self):
- self.assertRaises(ValueError,
- lambda: split_port("0.0.0.0:1000:2000-2002/tcp"))
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000-2002/tcp")
def test_port_only_with_colon(self):
- self.assertRaises(ValueError,
- lambda: split_port(":80"))
+ with pytest.raises(ValueError):
+ split_port(":80")
def test_host_only_with_colon(self):
- self.assertRaises(ValueError,
- lambda: split_port("localhost:"))
+ with pytest.raises(ValueError):
+ split_port("localhost:")
def test_with_no_container_port(self):
- self.assertRaises(ValueError,
- lambda: split_port("localhost:80:"))
+ with pytest.raises(ValueError):
+ split_port("localhost:80:")
def test_split_port_empty_string(self):
- self.assertRaises(ValueError, lambda: split_port(""))
+ with pytest.raises(ValueError):
+ split_port("")
def test_split_port_non_string(self):
assert split_port(1243) == (['1243'], None)
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
- self.assertEqual(port_bindings["1000"],
- [("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["1001"] == [("127.0.0.1", "1001")]
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
- self.assertEqual(port_bindings["1000"],
- [("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
- self.assertEqual(port_bindings["1001"],
- [("127.0.0.1", "1001"), ("127.0.0.1", "2001")])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+ assert port_bindings["1001"] == [
+ ("127.0.0.1", "1001"), ("127.0.0.1", "2001")
+ ]
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
def convert_paths(collection):
@@ -708,11 +690,18 @@ class ExcludePathsTest(unittest.TestCase):
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
- assert self.exclude(['*'], dockerfile='Dockerfile.alt') == \
- set(['Dockerfile.alt', '.dockerignore'])
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(
+ ['Dockerfile.alt', '.dockerignore']
+ )
+
+ assert self.exclude(
+ ['*'], dockerfile='foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
- assert self.exclude(['*'], dockerfile='foo/Dockerfile3') == \
- convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+ # https://github.com/docker/docker-py/issues/1956
+ assert self.exclude(
+ ['*'], dockerfile='./foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
@@ -773,6 +762,13 @@ class ExcludePathsTest(unittest.TestCase):
self.all_paths - set(['foo/a.py'])
)
+ def test_exclude_include_absolute_path(self):
+ base = make_tree([], ['a.py', 'b.py'])
+ assert exclude_paths(
+ base,
+ ['/*', '!/*.py']
+ ) == set(['a.py', 'b.py'])
+
def test_single_subdir_with_path_traversal(self):
assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
self.all_paths - set(['foo/a.py'])
@@ -891,6 +887,42 @@ class ExcludePathsTest(unittest.TestCase):
)
)
+ def test_include_wildcard(self):
+ base = make_tree(['a'], ['a/b.py'])
+ assert exclude_paths(
+ base,
+ ['*', '!*/b.py']
+ ) == convert_paths(['a/b.py'])
+
+ def test_last_line_precedence(self):
+ base = make_tree(
+ [],
+ ['garbage.md',
+ 'thrash.md',
+ 'README.md',
+ 'README-bis.md',
+ 'README-secret.md'])
+ assert exclude_paths(
+ base,
+ ['*.md', '!README*.md', 'README-secret.md']
+ ) == set(['README.md', 'README-bis.md'])
+
+ def test_parent_directory(self):
+ base = make_tree(
+ [],
+ ['a.py',
+ 'b.py',
+ 'c.py'])
+ # Dockerignore reference stipulates that absolute paths are
+ # equivalent to relative paths, hence /../foo should be
+ # equivalent to ../foo. It also stipulates that paths are run
+ # through Go's filepath.Clean, which explicitely "replace
+ # "/.." by "/" at the beginning of a path".
+ assert exclude_paths(
+ base,
+ ['../a.py', '/../b.py']
+ ) == set(['c.py'])
+
class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
@@ -946,7 +978,25 @@ class TarTest(unittest.TestCase):
os.makedirs(os.path.join(base, d))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(sorted(tar_data.getnames()), ['bar', 'foo'])
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM or os.geteuid() == 0,
+ reason='root user always has access ; no chmod on Windows'
+ )
+ def test_tar_with_inaccessible_file(self):
+ base = tempfile.mkdtemp()
+ full_path = os.path.join(base, 'foo')
+ self.addCleanup(shutil.rmtree, base)
+ with open(full_path, 'w') as f:
+ f.write('content')
+ os.chmod(full_path, 0o222)
+ with pytest.raises(IOError) as ei:
+ tar(base)
+
+ assert 'Can not read file in context: {}'.format(full_path) in (
+ ei.exconly()
+ )
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_file_symlinks(self):
@@ -958,9 +1008,7 @@ class TarTest(unittest.TestCase):
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(
- sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
- )
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_directory_symlinks(self):
@@ -971,9 +1019,19 @@ class TarTest(unittest.TestCase):
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(
- sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
- )
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_broken_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+
+ os.symlink('../baz', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
def test_tar_socket_file(self):
@@ -986,72 +1044,34 @@ class TarTest(unittest.TestCase):
sock.bind(os.path.join(base, 'test.sock'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(
- sorted(tar_data.getnames()), ['bar', 'foo']
- )
-
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
-class ShouldCheckDirectoryTest(unittest.TestCase):
- exclude_patterns = [
- 'exclude_rather_large_directory',
- 'dir/with/subdir_excluded',
- 'dir/with/exceptions'
- ]
-
- include_patterns = [
- 'dir/with/exceptions/like_this_one',
- 'dir/with/exceptions/in/descendents'
- ]
-
- def test_should_check_directory_not_excluded(self):
- assert should_check_directory(
- 'not_excluded', self.exclude_patterns, self.include_patterns
- )
- assert should_check_directory(
- convert_path('dir/with'), self.exclude_patterns,
- self.include_patterns
- )
-
- def test_shoud_check_parent_directories_of_excluded(self):
- assert should_check_directory(
- 'dir', self.exclude_patterns, self.include_patterns
- )
- assert should_check_directory(
- convert_path('dir/with'), self.exclude_patterns,
- self.include_patterns
- )
-
- def test_should_not_check_excluded_directories_with_no_exceptions(self):
- assert not should_check_directory(
- 'exclude_rather_large_directory', self.exclude_patterns,
- self.include_patterns
- )
- assert not should_check_directory(
- convert_path('dir/with/subdir_excluded'), self.exclude_patterns,
- self.include_patterns
- )
-
- def test_should_check_excluded_directory_with_exceptions(self):
- assert should_check_directory(
- convert_path('dir/with/exceptions'), self.exclude_patterns,
- self.include_patterns
- )
- assert should_check_directory(
- convert_path('dir/with/exceptions/in'), self.exclude_patterns,
- self.include_patterns
- )
-
- def test_should_not_check_siblings_of_exceptions(self):
- assert not should_check_directory(
- convert_path('dir/with/exceptions/but_not_here'),
- self.exclude_patterns, self.include_patterns
- )
+ def tar_test_negative_mtime_bug(self):
+ base = tempfile.mkdtemp()
+ filename = os.path.join(base, 'th.txt')
+ self.addCleanup(shutil.rmtree, base)
+ with open(filename, 'w') as f:
+ f.write('Invisible Full Moon')
+ os.utime(filename, (12345, -3600.0))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert tar_data.getnames() == ['th.txt']
+ assert tar_data.getmember('th.txt').mtime == -3600
- def test_should_check_subdirectories_of_exceptions(self):
- assert should_check_directory(
- convert_path('dir/with/exceptions/like_this_one/subdir'),
- self.exclude_patterns, self.include_patterns
- )
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_directory_link(self):
+ dirs = ['a', 'b', 'a/c']
+ files = ['a/hello.py', 'b/utils.py', 'a/c/descend.py']
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+ os.symlink(os.path.join(base, 'b'), os.path.join(base, 'a/c/b'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ names = tar_data.getnames()
+ for member in dirs + files:
+ assert member in names
+ assert 'a/c/b' in names
+ assert 'a/c/b/utils.py' not in names
class FormatEnvironmentTest(unittest.TestCase):