summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--LICENSE13
-rw-r--r--MANIFEST.in1
-rw-r--r--PKG-INFO2
-rw-r--r--docker/__init__.py14
-rw-r--r--docker/api/__init__.py4
-rw-r--r--docker/api/build.py6
-rw-r--r--docker/api/container.py25
-rw-r--r--docker/api/exec_api.py14
-rw-r--r--docker/api/image.py216
-rw-r--r--docker/api/network.py35
-rw-r--r--docker/api/service.py105
-rw-r--r--docker/api/swarm.py78
-rw-r--r--docker/api/volume.py13
-rw-r--r--docker/auth/auth.py98
-rw-r--r--docker/client.py70
-rw-r--r--docker/constants.py3
-rw-r--r--docker/errors.py13
-rw-r--r--docker/transport/npipeconn.py10
-rw-r--r--docker/transport/npipesocket.py2
-rw-r--r--docker/transport/unixconn.py33
-rw-r--r--docker/types/__init__.py7
-rw-r--r--docker/types/base.py7
-rw-r--r--docker/types/containers.py92
-rw-r--r--docker/types/services.py181
-rw-r--r--docker/types/swarm.py40
-rw-r--r--docker/utils/__init__.py14
-rw-r--r--docker/utils/decorators.py4
-rw-r--r--docker/utils/socket.py68
-rw-r--r--docker/utils/types.py97
-rw-r--r--docker/utils/utils.py145
-rw-r--r--docker/version.py2
-rw-r--r--docker_py.egg-info/PKG-INFO2
-rw-r--r--docker_py.egg-info/SOURCES.txt10
-rw-r--r--docker_py.egg-info/requires.txt3
-rw-r--r--requirements.txt3
-rw-r--r--setup.cfg3
-rw-r--r--setup.py6
-rw-r--r--tests/helpers.py55
-rw-r--r--tests/integration/container_test.py48
-rw-r--r--tests/integration/exec_test.py28
-rw-r--r--tests/integration/image_test.py42
-rw-r--r--tests/integration/network_test.py78
-rw-r--r--tests/integration/service_test.py189
-rw-r--r--tests/integration/swarm_test.py145
-rw-r--r--tests/unit/api_test.py73
-rw-r--r--tests/unit/auth_test.py26
-rw-r--r--tests/unit/build_test.py61
-rw-r--r--tests/unit/client_test.py8
-rw-r--r--tests/unit/container_test.py117
-rw-r--r--tests/unit/exec_test.py32
-rw-r--r--tests/unit/fake_api.py50
-rw-r--r--tests/unit/image_test.py26
-rw-r--r--tests/unit/network_test.py2
-rw-r--r--tests/unit/utils_test.py96
-rw-r--r--tests/unit/volume_test.py16
55 files changed, 1972 insertions, 559 deletions
diff --git a/LICENSE b/LICENSE
index d645695..75191a4 100644
--- a/LICENSE
+++ b/LICENSE
@@ -176,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/MANIFEST.in b/MANIFEST.in
index ee6cdbb..f492931 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
+include setup.cfg
include test-requirements.txt
include requirements.txt
include README.md
diff --git a/PKG-INFO b/PKG-INFO
index 79c1598..e468347 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: docker-py
-Version: 1.9.0
+Version: 1.10.3
Summary: Python client for Docker.
Home-page: https://github.com/docker/docker-py/
Author: UNKNOWN
diff --git a/docker/__init__.py b/docker/__init__.py
index 84d0734..ad53805 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,17 +1,3 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
from .version import version, version_info
__version__ = version
diff --git a/docker/api/__init__.py b/docker/api/__init__.py
index 9e74428..bc7e93c 100644
--- a/docker/api/__init__.py
+++ b/docker/api/__init__.py
@@ -4,5 +4,7 @@ from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
-from .volume import VolumeApiMixin
from .network import NetworkApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
diff --git a/docker/api/build.py b/docker/api/build.py
index 971a50e..7403716 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -18,7 +18,8 @@ class BuildApiMixin(object):
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False):
- remote = context = headers = None
+ remote = context = None
+ headers = {}
container_limits = container_limits or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
@@ -134,8 +135,7 @@ class BuildApiMixin(object):
', '.join(repr(k) for k in self._auth_configs.keys())
)
)
- if headers is None:
- headers = {}
+
if utils.compare_version('1.19', self._version) >= 0:
headers['X-Registry-Config'] = auth.encode_header(
self._auth_configs
diff --git a/docker/api/container.py b/docker/api/container.py
index 9cc14db..b8507d8 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -15,12 +15,18 @@ class ContainerApiMixin(object):
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
- 'stream': stream and 1 or 0,
+ 'stream': stream and 1 or 0
}
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
u = self._url("/containers/{0}/attach", container)
- response = self._post(u, params=params, stream=stream)
+ response = self._post(u, headers=headers, params=params, stream=stream)
- return self._get_result(container, stream, response)
+ return self._read_from_socket(response, stream)
@utils.check_resource
def attach_socket(self, container, params=None, ws=False):
@@ -34,9 +40,18 @@ class ContainerApiMixin(object):
if ws:
return self._attach_websocket(container, params)
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
u = self._url("/containers/{0}/attach", container)
- return self._get_raw_response_socket(self.post(
- u, None, params=self._attach_params(params), stream=True))
+ return self._get_raw_response_socket(
+ self.post(
+ u, None, params=self._attach_params(params), stream=True,
+ headers=headers
+ )
+ )
@utils.check_resource
def commit(self, container, repository=None, tag=None, message=None,
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index f0e4afa..6e49996 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -56,8 +56,6 @@ class ExecApiMixin(object):
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
# we want opened socket if socket == True
- if socket:
- stream = True
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -66,10 +64,18 @@ class ExecApiMixin(object):
'Detach': detach
}
+ headers = {} if detach else {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
res = self._post_json(
- self._url('/exec/{0}/start', exec_id), data=data, stream=stream
+ self._url('/exec/{0}/start', exec_id),
+ headers=headers,
+ data=data,
+ stream=True
)
if socket:
return self._get_raw_response_socket(res)
- return self._get_result_tty(stream, res, tty)
+ return self._read_from_socket(res, stream)
diff --git a/docker/api/image.py b/docker/api/image.py
index 3e66347..7f25f9d 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,4 +1,5 @@
import logging
+import os
import six
import warnings
@@ -42,87 +43,79 @@ class ImageApiMixin(object):
return [x['Id'] for x in res]
return res
- def import_image(self, src=None, repository=None, tag=None, image=None):
- if src:
- if isinstance(src, six.string_types):
- try:
- result = self.import_image_from_file(
- src, repository=repository, tag=tag)
- except IOError:
- result = self.import_image_from_url(
- src, repository=repository, tag=tag)
- else:
- result = self.import_image_from_data(
- src, repository=repository, tag=tag)
- elif image:
- result = self.import_image_from_image(
- image, repository=repository, tag=tag)
- else:
- raise Exception("Must specify a src or image")
-
- return result
-
- def import_image_from_data(self, data, repository=None, tag=None):
- u = self._url("/images/create")
- params = {
- 'fromSrc': '-',
- 'repo': repository,
- 'tag': tag
- }
- headers = {
- 'Content-Type': 'application/tar',
- }
- return self._result(
- self._post(u, data=data, params=params, headers=headers))
+ def import_image(self, src=None, repository=None, tag=None, image=None,
+ changes=None, stream_src=False):
+ if not (src or image):
+ raise errors.DockerException(
+ 'Must specify src or image to import from'
+ )
+ u = self._url('/images/create')
- def import_image_from_file(self, filename, repository=None, tag=None):
- u = self._url("/images/create")
- params = {
- 'fromSrc': '-',
- 'repo': repository,
- 'tag': tag
- }
- headers = {
- 'Content-Type': 'application/tar',
- }
- with open(filename, 'rb') as f:
+ params = _import_image_params(
+ repository, tag, image,
+ src=(src if isinstance(src, six.string_types) else None),
+ changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
+
+ if image or params.get('fromSrc') != '-': # from image or URL
+ return self._result(
+ self._post(u, data=None, params=params)
+ )
+ elif isinstance(src, six.string_types): # from file path
+ with open(src, 'rb') as f:
+ return self._result(
+ self._post(
+ u, data=f, params=params, headers=headers, timeout=None
+ )
+ )
+ else: # from raw data
+ if stream_src:
+ headers['Transfer-Encoding'] = 'chunked'
return self._result(
- self._post(u, data=f, params=params, headers=headers,
- timeout=None))
+ self._post(u, data=src, params=params, headers=headers)
+ )
- def import_image_from_stream(self, stream, repository=None, tag=None):
- u = self._url("/images/create")
- params = {
- 'fromSrc': '-',
- 'repo': repository,
- 'tag': tag
- }
- headers = {
- 'Content-Type': 'application/tar',
- 'Transfer-Encoding': 'chunked',
- }
+ def import_image_from_data(self, data, repository=None, tag=None,
+ changes=None):
+ u = self._url('/images/create')
+ params = _import_image_params(
+ repository, tag, src='-', changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
return self._result(
- self._post(u, data=stream, params=params, headers=headers))
+ self._post(
+ u, data=data, params=params, headers=headers, timeout=None
+ )
+ )
+ return self.import_image(
+ src=data, repository=repository, tag=tag, changes=changes
+ )
- def import_image_from_url(self, url, repository=None, tag=None):
- u = self._url("/images/create")
- params = {
- 'fromSrc': url,
- 'repo': repository,
- 'tag': tag
- }
- return self._result(
- self._post(u, data=None, params=params))
+ def import_image_from_file(self, filename, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ src=filename, repository=repository, tag=tag, changes=changes
+ )
- def import_image_from_image(self, image, repository=None, tag=None):
- u = self._url("/images/create")
- params = {
- 'fromImage': image,
- 'repo': repository,
- 'tag': tag
- }
- return self._result(
- self._post(u, data=None, params=params))
+ def import_image_from_stream(self, stream, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ src=stream, stream_src=True, repository=repository, tag=tag,
+ changes=changes
+ )
+
+ def import_image_from_url(self, url, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ src=url, repository=repository, tag=tag, changes=changes
+ )
+
+ def import_image_from_image(self, image, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ image=image, repository=repository, tag=tag, changes=changes
+ )
@utils.check_resource
def insert(self, image, url, path):
@@ -166,28 +159,10 @@ class ImageApiMixin(object):
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
- # If we don't have any auth data so far, try reloading the config
- # file one more time in case anything showed up in there.
if auth_config is None:
- log.debug('Looking for auth config')
- if not self._auth_configs:
- log.debug(
- "No auth config in memory - loading from filesystem"
- )
- self._auth_configs = auth.load_config()
- authcfg = auth.resolve_authconfig(self._auth_configs, registry)
- # Do not fail here if no authentication exists for this
- # specific registry as we can have a readonly pull. Just
- # put the header if we can.
- if authcfg:
- log.debug('Found auth config')
- # auth_config needs to be a dict in the format used by
- # auth.py username , password, serveraddress, email
- headers['X-Registry-Auth'] = auth.encode_header(
- authcfg
- )
- else:
- log.debug('No auth config found')
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
@@ -205,7 +180,7 @@ class ImageApiMixin(object):
return self._result(response)
def push(self, repository, tag=None, stream=False,
- insecure_registry=False, decode=False):
+ insecure_registry=False, auth_config=None, decode=False):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
@@ -222,17 +197,13 @@ class ImageApiMixin(object):
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
- # If we don't have any auth data so far, try reloading the config
- # file one more time in case anything showed up in there.
- if not self._auth_configs:
- self._auth_configs = auth.load_config()
- authcfg = auth.resolve_authconfig(self._auth_configs, registry)
-
- # Do not fail here if no authentication exists for this specific
- # registry as we can have a readonly pull. Just put the header if
- # we can.
- if authcfg:
- headers['X-Registry-Auth'] = auth.encode_header(authcfg)
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
@@ -268,3 +239,32 @@ class ImageApiMixin(object):
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
+
+
+def is_file(src):
+ try:
+ return (
+ isinstance(src, six.string_types) and
+ os.path.isfile(src)
+ )
+ except TypeError: # a data string will make isfile() raise a TypeError
+ return False
+
+
+def _import_image_params(repo, tag, image=None, src=None,
+ changes=None):
+ params = {
+ 'repo': repo,
+ 'tag': tag,
+ }
+ if image:
+ params['fromImage'] = image
+ elif src and not is_file(src):
+ params['fromSrc'] = src
+ else:
+ params['fromSrc'] = '-'
+
+ if changes:
+ params['changes'] = changes
+
+ return params
diff --git a/docker/api/network.py b/docker/api/network.py
index a35f0a4..0ee0dab 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -22,7 +22,8 @@ class NetworkApiMixin(object):
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
- check_duplicate=None, internal=False):
+ check_duplicate=None, internal=False, labels=None,
+ enable_ipv6=False):
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -34,6 +35,22 @@ class NetworkApiMixin(object):
'CheckDuplicate': check_duplicate
}
+ if labels is not None:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'network labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ if enable_ipv6:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'enable_ipv6 was introduced in API 1.23'
+ )
+ data['EnableIPv6'] = True
+
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
@@ -60,12 +77,13 @@ class NetworkApiMixin(object):
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
- aliases=None, links=None):
+ aliases=None, links=None,
+ link_local_ips=None):
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
- ipv6_address=ipv6_address
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips
),
}
@@ -75,8 +93,15 @@ class NetworkApiMixin(object):
@check_resource
@minimum_version('1.21')
- def disconnect_container_from_network(self, container, net_id):
- data = {"container": container}
+ def disconnect_container_from_network(self, container, net_id,
+ force=False):
+ data = {"Container": container}
+ if force:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion(
+ 'Forced disconnect was introduced in API 1.22'
+ )
+ data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
diff --git a/docker/api/service.py b/docker/api/service.py
new file mode 100644
index 0000000..baebbad
--- /dev/null
+++ b/docker/api/service.py
@@ -0,0 +1,105 @@
+from .. import errors
+from .. import utils
+from ..auth import auth
+
+
+class ServiceApiMixin(object):
+ @utils.minimum_version('1.24')
+ def create_service(
+ self, task_template, name=None, labels=None, mode=None,
+ update_config=None, networks=None, endpoint_config=None
+ ):
+ url = self._url('/services/create')
+ headers = {}
+ image = task_template.get('ContainerSpec', {}).get('Image', None)
+ if image is None:
+ raise errors.DockerException(
+ 'Missing mandatory Image key in ContainerSpec'
+ )
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+ data = {
+ 'Name': name,
+ 'Labels': labels,
+ 'TaskTemplate': task_template,
+ 'Mode': mode,
+ 'UpdateConfig': update_config,
+ 'Networks': networks,
+ 'Endpoint': endpoint_config
+ }
+ return self._result(
+ self._post_json(url, data=data, headers=headers), True
+ )
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource
+ def inspect_service(self, service):
+ url = self._url('/services/{0}', service)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource
+ def inspect_task(self, task):
+ url = self._url('/tasks/{0}', task)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource
+ def remove_service(self, service):
+ url = self._url('/services/{0}', service)
+ resp = self._delete(url)
+ self._raise_for_status(resp)
+ return True
+
+ @utils.minimum_version('1.24')
+ def services(self, filters=None):
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/services')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ def tasks(self, filters=None):
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/tasks')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource
+ def update_service(self, service, version, task_template=None, name=None,
+ labels=None, mode=None, update_config=None,
+ networks=None, endpoint_config=None):
+ url = self._url('/services/{0}/update', service)
+ data = {}
+ headers = {}
+ if name is not None:
+ data['Name'] = name
+ if labels is not None:
+ data['Labels'] = labels
+ if mode is not None:
+ data['Mode'] = mode
+ if task_template is not None:
+ image = task_template.get('ContainerSpec', {}).get('Image', None)
+ if image is not None:
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+ data['TaskTemplate'] = task_template
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+ if networks is not None:
+ data['Networks'] = networks
+ if endpoint_config is not None:
+ data['Endpoint'] = endpoint_config
+
+ resp = self._post_json(
+ url, data=data, params={'version': version}, headers=headers
+ )
+ self._raise_for_status(resp)
+ return True
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
new file mode 100644
index 0000000..d099364
--- /dev/null
+++ b/docker/api/swarm.py
@@ -0,0 +1,78 @@
+from .. import utils
+import logging
+log = logging.getLogger(__name__)
+
+
+class SwarmApiMixin(object):
+
+ def create_swarm_spec(self, *args, **kwargs):
+ return utils.SwarmSpec(*args, **kwargs)
+
+ @utils.minimum_version('1.24')
+ def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, swarm_spec=None):
+ url = self._url('/swarm/init')
+ if swarm_spec is not None and not isinstance(swarm_spec, dict):
+ raise TypeError('swarm_spec must be a dictionary')
+ data = {
+ 'AdvertiseAddr': advertise_addr,
+ 'ListenAddr': listen_addr,
+ 'ForceNewCluster': force_new_cluster,
+ 'Spec': swarm_spec,
+ }
+ response = self._post_json(url, data=data)
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def inspect_swarm(self):
+ url = self._url('/swarm')
+ return self._result(self._get(url), True)
+
+ @utils.check_resource
+ @utils.minimum_version('1.24')
+ def inspect_node(self, node_id):
+ url = self._url('/nodes/{0}', node_id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ def join_swarm(self, remote_addrs, join_token, listen_addr=None,
+ advertise_addr=None):
+ data = {
+ "RemoteAddrs": remote_addrs,
+ "ListenAddr": listen_addr,
+ "JoinToken": join_token,
+ "AdvertiseAddr": advertise_addr,
+ }
+ url = self._url('/swarm/join')
+ response = self._post_json(url, data=data)
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def leave_swarm(self, force=False):
+ url = self._url('/swarm/leave')
+ response = self._post(url, params={'force': force})
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def nodes(self, filters=None):
+ url = self._url('/nodes')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
+ rotate_manager_token=False):
+ url = self._url('/swarm/update')
+ response = self._post_json(url, data=swarm_spec, params={
+ 'rotateWorkerToken': rotate_worker_token,
+ 'rotateManagerToken': rotate_manager_token,
+ 'version': version
+ })
+ self._raise_for_status(response)
+ return True
diff --git a/docker/api/volume.py b/docker/api/volume.py
index bb8b39b..afc72cb 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -1,3 +1,4 @@
+from .. import errors
from .. import utils
@@ -11,7 +12,7 @@ class VolumeApiMixin(object):
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.21')
- def create_volume(self, name, driver=None, driver_opts=None):
+ def create_volume(self, name, driver=None, driver_opts=None, labels=None):
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
@@ -21,6 +22,16 @@ class VolumeApiMixin(object):
'Driver': driver,
'DriverOpts': driver_opts,
}
+
+ if labels is not None:
+ if utils.compare_version('1.23', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'volume labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
return self._result(self._post_json(url, data=data), True)
@utils.minimum_version('1.21')
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index d23e6f3..dc0baea 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -1,22 +1,9 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
import base64
import json
import logging
import os
+import dockerpycreds
import six
from .. import errors
@@ -25,6 +12,7 @@ INDEX_NAME = 'docker.io'
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -51,6 +39,26 @@ def resolve_index_name(index_name):
return index_name
+def get_config_header(client, registry):
+ log.debug('Looking for auth config')
+ if not client._auth_configs:
+ log.debug(
+ "No auth config in memory - loading from filesystem"
+ )
+ client._auth_configs = load_config()
+ authcfg = resolve_authconfig(client._auth_configs, registry)
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ log.debug('Found auth config')
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ return encode_header(authcfg)
+ log.debug('No auth config found')
+ return None
+
+
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
@@ -68,6 +76,13 @@ def resolve_authconfig(authconfig, registry=None):
with full URLs are stripped down to hostnames before checking for a match.
Returns None if no match was found.
"""
+ if 'credsStore' in authconfig:
+ log.debug(
+ 'Using credentials store "{0}"'.format(authconfig['credsStore'])
+ )
+ return _resolve_authconfig_credstore(
+ authconfig, registry, authconfig['credsStore']
+ )
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
@@ -85,6 +100,35 @@ def resolve_authconfig(authconfig, registry=None):
return None
+def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = 'https://index.docker.io/v1/'
+ log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ store = dockerpycreds.Store(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except dockerpycreds.CredentialsNotFound as e:
+ log.debug('No entry found')
+ return None
+ except dockerpycreds.StoreError as e:
+ raise errors.DockerException(
+ 'Credentials store error: {0}'.format(repr(e))
+ )
+
+
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
@@ -130,6 +174,15 @@ def parse_auth(entries, raise_on_error=False):
'Invalid configuration for registry {0}'.format(registry)
)
return {}
+ if 'identitytoken' in entry:
+ log.debug('Found an IdentityToken entry for registry {0}'.format(
+ registry
+ ))
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token, skip.
+
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
@@ -138,13 +191,15 @@ def parse_auth(entries, raise_on_error=False):
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'
)
- return {}
+ conf[registry] = {}
+ continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
+
conf[registry] = {
'username': username,
'password': password,
@@ -160,18 +215,24 @@ def find_config_file(config_path=None):
os.path.basename(DOCKER_CONFIG_FILENAME)
) if os.environ.get('DOCKER_CONFIG') else None
- paths = [
+ paths = filter(None, [
config_path, # 1
environment_path, # 2
os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
os.path.join(
os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
) # 4
- ]
+ ])
+
+ log.debug("Trying paths: {0}".format(repr(paths)))
for path in paths:
- if path and os.path.exists(path):
+ if os.path.exists(path):
+ log.debug("Found file at path: {0}".format(path))
return path
+
+ log.debug("No config file found")
+
return None
@@ -186,7 +247,6 @@ def load_config(config_path=None):
config_file = find_config_file(config_path)
if not config_file:
- log.debug("File doesn't exist")
return {}
try:
diff --git a/docker/client.py b/docker/client.py
index 81e9de9..47ad09e 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,19 +1,6 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
import json
import struct
+from functools import partial
import requests
import requests.exceptions
@@ -29,6 +16,7 @@ from .ssladapter import ssladapter
from .tls import TLSConfig
from .transport import UnixAdapter
from .utils import utils, check_resource, update_headers, kwargs_from_env
+from .utils.socket import frames_iter
try:
from .transport import NpipeAdapter
except ImportError:
@@ -46,11 +34,14 @@ class Client(
api.DaemonApiMixin,
api.ExecApiMixin,
api.ImageApiMixin,
- api.VolumeApiMixin,
- api.NetworkApiMixin):
+ api.NetworkApiMixin,
+ api.ServiceApiMixin,
+ api.SwarmApiMixin,
+ api.VolumeApiMixin):
def __init__(self, base_url=None, version=None,
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=constants.DEFAULT_USER_AGENT):
+ user_agent=constants.DEFAULT_USER_AGENT,
+ num_pools=constants.DEFAULT_NUM_POOLS):
super(Client, self).__init__()
if tls and not base_url:
@@ -68,8 +59,11 @@ class Client(
base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
)
if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(base_url, timeout)
+ self._custom_adapter = UnixAdapter(
+ base_url, timeout, num_pools=num_pools
+ )
self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
self.base_url = 'http+docker://localunixsocket'
elif base_url.startswith('npipe://'):
if not constants.IS_WINDOWS_PLATFORM:
@@ -77,7 +71,9 @@ class Client(
'The npipe:// protocol is only supported on Windows'
)
try:
- self._custom_adapter = NpipeAdapter(base_url, timeout)
+ self._custom_adapter = NpipeAdapter(
+ base_url, timeout, num_pools=num_pools
+ )
except NameError:
raise errors.DockerException(
'Install pypiwin32 package to enable npipe:// support'
@@ -89,7 +85,9 @@ class Client(
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
- self._custom_adapter = ssladapter.SSLAdapter()
+ self._custom_adapter = ssladapter.SSLAdapter(
+ num_pools=num_pools
+ )
self.mount('https://', self._custom_adapter)
self.base_url = base_url
@@ -110,7 +108,8 @@ class Client(
@classmethod
def from_env(cls, **kwargs):
- return cls(**kwargs_from_env(**kwargs))
+ version = kwargs.pop('version', None)
+ return cls(version=version, **kwargs_from_env(**kwargs))
def _retrieve_server_version(self):
try:
@@ -155,7 +154,8 @@ class Client(
'instead'.format(arg, type(arg))
)
- args = map(six.moves.urllib.parse.quote_plus, args)
+ quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
+ args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
@@ -250,12 +250,20 @@ class Client(
if decode:
if six.PY3:
data = data.decode('utf-8')
- data = json.loads(data)
- yield data
+ # remove the trailing newline
+ data = data.strip()
+ # split the data at any newlines
+ data_list = data.split("\r\n")
+ # load and yield each line seperately
+ for data in data_list:
+ data = json.loads(data)
+ yield data
+ else:
+ yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
- yield self._result(response)
+ yield self._result(response, json=decode)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
@@ -307,6 +315,14 @@ class Client(
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
+ def _read_from_socket(self, response, stream):
+ socket = self._get_raw_response_socket(response)
+
+ if stream:
+ return frames_iter(socket)
+ else:
+ return six.binary_type().join(frames_iter(socket))
+
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
@@ -360,6 +376,10 @@ class Client(
[x for x in self._multiplexed_buffer_helper(res)]
)
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
def get_adapter(self, url):
try:
return super(Client, self).get_adapter(url)
diff --git a/docker/constants.py b/docker/constants.py
index 904d50e..0c9a020 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.22'
+DEFAULT_DOCKER_API_VERSION = '1.24'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
@@ -15,3 +15,4 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
DEFAULT_USER_AGENT = "docker-py/{0}".format(version)
+DEFAULT_NUM_POOLS = 25
diff --git a/docker/errors.py b/docker/errors.py
index e85910c..97be802 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -1,16 +1,3 @@
-# Copyright 2014 dotCloud inc.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
import requests
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index 736ddf6..917fa8b 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,6 +1,7 @@
import six
import requests.adapters
+from .. import constants
from .npipesocket import NpipeSocket
if six.PY3:
@@ -33,9 +34,9 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
- def __init__(self, npipe_path, timeout=60):
+ def __init__(self, npipe_path, timeout=60, maxsize=10):
super(NpipeHTTPConnectionPool, self).__init__(
- 'localhost', timeout=timeout
+ 'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
@@ -47,11 +48,12 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class NpipeAdapter(requests.adapters.HTTPAdapter):
- def __init__(self, base_url, timeout=60):
+ def __init__(self, base_url, timeout=60,
+ num_pools=constants.DEFAULT_NUM_POOLS):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.pools = RecentlyUsedContainer(
- 10, dispose_func=lambda p: p.close()
+ num_pools, dispose_func=lambda p: p.close()
)
super(NpipeAdapter, self).__init__()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index 35418ef..9010ceb 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -94,7 +94,7 @@ class NpipeSocket(object):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
- if bufsize is None:
+ if bufsize is None or bufsize < 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index f4d83ef..b7905a0 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,20 +1,9 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
import six
import requests.adapters
import socket
+from .. import constants
+
if six.PY3:
import http.client as httplib
else:
@@ -25,6 +14,7 @@ try:
except ImportError:
import urllib3
+
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
@@ -45,28 +35,31 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
- def __init__(self, base_url, socket_path, timeout=60):
+ def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super(UnixHTTPConnectionPool, self).__init__(
- 'localhost', timeout=timeout
+ 'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
- return UnixHTTPConnection(self.base_url, self.socket_path,
- self.timeout)
+ return UnixHTTPConnection(
+ self.base_url, self.socket_path, self.timeout
+ )
class UnixAdapter(requests.adapters.HTTPAdapter):
- def __init__(self, socket_url, timeout=60):
+ def __init__(self, socket_url, timeout=60,
+ num_pools=constants.DEFAULT_NUM_POOLS):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
- self.pools = RecentlyUsedContainer(10,
- dispose_func=lambda p: p.close())
+ self.pools = RecentlyUsedContainer(
+ num_pools, dispose_func=lambda p: p.close()
+ )
super(UnixAdapter, self).__init__()
def get_connection(self, url, proxies=None):
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
new file mode 100644
index 0000000..3609581
--- /dev/null
+++ b/docker/types/__init__.py
@@ -0,0 +1,7 @@
+# flake8: noqa
+from .containers import LogConfig, Ulimit
+from .services import (
+ ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate,
+ UpdateConfig
+)
+from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/base.py b/docker/types/base.py
new file mode 100644
index 0000000..6891062
--- /dev/null
+++ b/docker/types/base.py
@@ -0,0 +1,7 @@
+import six
+
+
+class DictType(dict):
+ def __init__(self, init):
+ for k, v in six.iteritems(init):
+ self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
new file mode 100644
index 0000000..40a44ca
--- /dev/null
+++ b/docker/types/containers.py
@@ -0,0 +1,92 @@
+import six
+
+from .base import DictType
+
+
+class LogConfigTypesEnum(object):
+ _values = (
+ 'json-file',
+ 'syslog',
+ 'journald',
+ 'gelf',
+ 'fluentd',
+ 'none'
+ )
+ JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
+
+
+class LogConfig(DictType):
+ types = LogConfigTypesEnum
+
+ def __init__(self, **kwargs):
+ log_driver_type = kwargs.get('type', kwargs.get('Type'))
+ config = kwargs.get('config', kwargs.get('Config')) or {}
+
+ if config and not isinstance(config, dict):
+ raise ValueError("LogConfig.config must be a dictionary")
+
+ super(LogConfig, self).__init__({
+ 'Type': log_driver_type,
+ 'Config': config
+ })
+
+ @property
+ def type(self):
+ return self['Type']
+
+ @type.setter
+ def type(self, value):
+ self['Type'] = value
+
+ @property
+ def config(self):
+ return self['Config']
+
+ def set_config_value(self, key, value):
+ self.config[key] = value
+
+ def unset_config(self, key):
+ if key in self.config:
+ del self.config[key]
+
+
+class Ulimit(DictType):
+ def __init__(self, **kwargs):
+ name = kwargs.get('name', kwargs.get('Name'))
+ soft = kwargs.get('soft', kwargs.get('Soft'))
+ hard = kwargs.get('hard', kwargs.get('Hard'))
+ if not isinstance(name, six.string_types):
+ raise ValueError("Ulimit.name must be a string")
+ if soft and not isinstance(soft, int):
+ raise ValueError("Ulimit.soft must be an integer")
+ if hard and not isinstance(hard, int):
+ raise ValueError("Ulimit.hard must be an integer")
+ super(Ulimit, self).__init__({
+ 'Name': name,
+ 'Soft': soft,
+ 'Hard': hard
+ })
+
+ @property
+ def name(self):
+ return self['Name']
+
+ @name.setter
+ def name(self, value):
+ self['Name'] = value
+
+ @property
+ def soft(self):
+ return self.get('Soft')
+
+ @soft.setter
+ def soft(self, value):
+ self['Soft'] = value
+
+ @property
+ def hard(self):
+ return self.get('Hard')
+
+ @hard.setter
+ def hard(self, value):
+ self['Hard'] = value
diff --git a/docker/types/services.py b/docker/types/services.py
new file mode 100644
index 0000000..8488d6e
--- /dev/null
+++ b/docker/types/services.py
@@ -0,0 +1,181 @@
+import six
+
+from .. import errors
+
+
+class TaskTemplate(dict):
+ def __init__(self, container_spec, resources=None, restart_policy=None,
+ placement=None, log_driver=None):
+ self['ContainerSpec'] = container_spec
+ if resources:
+ self['Resources'] = resources
+ if restart_policy:
+ self['RestartPolicy'] = restart_policy
+ if placement:
+ self['Placement'] = placement
+ if log_driver:
+ self['LogDriver'] = log_driver
+
+ @property
+ def container_spec(self):
+ return self.get('ContainerSpec')
+
+ @property
+ def resources(self):
+ return self.get('Resources')
+
+ @property
+ def restart_policy(self):
+ return self.get('RestartPolicy')
+
+ @property
+ def placement(self):
+ return self.get('Placement')
+
+
+class ContainerSpec(dict):
+ def __init__(self, image, command=None, args=None, env=None, workdir=None,
+ user=None, labels=None, mounts=None, stop_grace_period=None):
+ from ..utils import split_command # FIXME: circular import
+
+ self['Image'] = image
+
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+ self['Command'] = command
+ self['Args'] = args
+
+ if env is not None:
+ self['Env'] = env
+ if workdir is not None:
+ self['Dir'] = workdir
+ if user is not None:
+ self['User'] = user
+ if labels is not None:
+ self['Labels'] = labels
+ if mounts is not None:
+ for mount in mounts:
+ if isinstance(mount, six.string_types):
+ mounts.append(Mount.parse_mount_string(mount))
+ mounts.remove(mount)
+ self['Mounts'] = mounts
+ if stop_grace_period is not None:
+ self['StopGracePeriod'] = stop_grace_period
+
+
+class Mount(dict):
+ def __init__(self, target, source, type='volume', read_only=False,
+ propagation=None, no_copy=False, labels=None,
+ driver_config=None):
+ self['Target'] = target
+ self['Source'] = source
+ if type not in ('bind', 'volume'):
+ raise errors.DockerError(
+ 'Only acceptable mount types are `bind` and `volume`.'
+ )
+ self['Type'] = type
+
+ if type == 'bind':
+ if propagation is not None:
+ self['BindOptions'] = {
+ 'Propagation': propagation
+ }
+ if any([labels, driver_config, no_copy]):
+ raise errors.DockerError(
+ 'Mount type is binding but volume options have been '
+ 'provided.'
+ )
+ else:
+ volume_opts = {}
+ if no_copy:
+ volume_opts['NoCopy'] = True
+ if labels:
+ volume_opts['Labels'] = labels
+ if driver_config:
+ volume_opts['driver_config'] = driver_config
+ if volume_opts:
+ self['VolumeOptions'] = volume_opts
+ if propagation:
+ raise errors.DockerError(
+ 'Mount type is volume but `propagation` argument has been '
+ 'provided.'
+ )
+
+ @classmethod
+ def parse_mount_string(cls, string):
+ parts = string.split(':')
+ if len(parts) > 3:
+ raise errors.DockerError(
+ 'Invalid mount format "{0}"'.format(string)
+ )
+ if len(parts) == 1:
+ return cls(target=parts[0])
+ else:
+ target = parts[1]
+ source = parts[0]
+ read_only = not (len(parts) == 3 or parts[2] == 'ro')
+ return cls(target, source, read_only=read_only)
+
+
+class Resources(dict):
+ def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
+ mem_reservation=None):
+ limits = {}
+ reservation = {}
+ if cpu_limit is not None:
+ limits['NanoCPUs'] = cpu_limit
+ if mem_limit is not None:
+ limits['MemoryBytes'] = mem_limit
+ if cpu_reservation is not None:
+ reservation['NanoCPUs'] = cpu_reservation
+ if mem_reservation is not None:
+ reservation['MemoryBytes'] = mem_reservation
+
+ if limits:
+ self['Limits'] = limits
+ if reservation:
+ self['Reservations'] = reservation
+
+
+class UpdateConfig(dict):
+ def __init__(self, parallelism=0, delay=None, failure_action='continue'):
+ self['Parallelism'] = parallelism
+ if delay is not None:
+ self['Delay'] = delay
+ if failure_action not in ('pause', 'continue'):
+ raise errors.DockerError(
+ 'failure_action must be either `pause` or `continue`.'
+ )
+ self['FailureAction'] = failure_action
+
+
+class RestartConditionTypesEnum(object):
+ _values = (
+ 'none',
+ 'on_failure',
+ 'any',
+ )
+ NONE, ON_FAILURE, ANY = _values
+
+
+class RestartPolicy(dict):
+ condition_types = RestartConditionTypesEnum
+
+ def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
+ max_attempts=0, window=0):
+ if condition not in self.condition_types._values:
+ raise TypeError(
+ 'Invalid RestartPolicy condition {0}'.format(condition)
+ )
+
+ self['Condition'] = condition
+ self['Delay'] = delay
+ self['MaxAttempts'] = max_attempts
+ self['Window'] = window
+
+
+class DriverConfig(dict):
+ def __init__(self, name, options=None):
+ self['Name'] = name
+ if options:
+ self['Options'] = options
diff --git a/docker/types/swarm.py b/docker/types/swarm.py
new file mode 100644
index 0000000..865fde6
--- /dev/null
+++ b/docker/types/swarm.py
@@ -0,0 +1,40 @@
+class SwarmSpec(dict):
+ def __init__(self, task_history_retention_limit=None,
+ snapshot_interval=None, keep_old_snapshots=None,
+ log_entries_for_slow_followers=None, heartbeat_tick=None,
+ election_tick=None, dispatcher_heartbeat_period=None,
+ node_cert_expiry=None, external_ca=None, name=None):
+ if task_history_retention_limit is not None:
+ self['Orchestration'] = {
+ 'TaskHistoryRetentionLimit': task_history_retention_limit
+ }
+ if any([snapshot_interval, keep_old_snapshots,
+ log_entries_for_slow_followers, heartbeat_tick, election_tick]):
+ self['Raft'] = {
+ 'SnapshotInterval': snapshot_interval,
+ 'KeepOldSnapshots': keep_old_snapshots,
+ 'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
+ 'HeartbeatTick': heartbeat_tick,
+ 'ElectionTick': election_tick
+ }
+
+ if dispatcher_heartbeat_period:
+ self['Dispatcher'] = {
+ 'HeartbeatPeriod': dispatcher_heartbeat_period
+ }
+
+ if node_cert_expiry or external_ca:
+ self['CAConfig'] = {
+ 'NodeCertExpiry': node_cert_expiry,
+ 'ExternalCA': external_ca
+ }
+
+ if name is not None:
+ self['Name'] = name
+
+
+class SwarmExternalCA(dict):
+ def __init__(self, url, protocol=None, options=None):
+ self['URL'] = url
+ self['Protocol'] = protocol
+ self['Options'] = options
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index ccc3819..4bb3876 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,11 +1,13 @@
+# flake8: noqa
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
- kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config,
- create_container_config, parse_bytes, ping_registry, parse_env_file,
- version_lt, version_gte, decode_json_header, split_command,
+ kwargs_from_env, convert_filters, datetime_to_timestamp,
+ create_host_config, create_container_config, parse_bytes, ping_registry,
+ parse_env_file, version_lt, version_gte, decode_json_header, split_command,
create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
-) # flake8: noqa
+)
-from .types import Ulimit, LogConfig # flake8: noqa
-from .decorators import check_resource, minimum_version, update_headers #flake8: noqa
+from ..types import LogConfig, Ulimit
+from ..types import SwarmExternalCA, SwarmSpec
+from .decorators import check_resource, minimum_version, update_headers
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 7c41a5f..2fe880c 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -13,7 +13,7 @@ def check_resource(f):
elif kwargs.get('image'):
resource_id = kwargs.pop('image')
if isinstance(resource_id, dict):
- resource_id = resource_id.get('Id')
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
if not resource_id:
raise errors.NullResource(
'image or container param is undefined'
@@ -40,7 +40,7 @@ def minimum_version(version):
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
- if 'headers' not in kwargs:
+ if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
new file mode 100644
index 0000000..ed34350
--- /dev/null
+++ b/docker/utils/socket.py
@@ -0,0 +1,68 @@
+import errno
+import os
+import select
+import struct
+
+import six
+
+
+class SocketError(Exception):
+ pass
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ # wait for data to become available
+ select.select([socket], [], [])
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ return os.read(socket.fileno(), n)
+ except EnvironmentError as e:
+ if e.errno not in recoverable_errors:
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = six.binary_type()
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_size(socket):
+ """
+ Returns the size of the next frame of data waiting to be read from socket,
+ according to the protocol defined here:
+
+ https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return 0
+
+ _, actual = struct.unpack('>BxxxL', data)
+ return actual
+
+
+def frames_iter(socket):
+ """
+ Returns a generator of frames read from socket
+ """
+ n = next_frame_size(socket)
+ while n > 0:
+ yield read(socket, n)
+ n = next_frame_size(socket)
diff --git a/docker/utils/types.py b/docker/utils/types.py
index ea9f06d..8098c47 100644
--- a/docker/utils/types.py
+++ b/docker/utils/types.py
@@ -1,96 +1,7 @@
-import six
+# Compatibility module. See https://github.com/docker/docker-py/issues/1196
+import warnings
-class LogConfigTypesEnum(object):
- _values = (
- 'json-file',
- 'syslog',
- 'journald',
- 'gelf',
- 'fluentd',
- 'none'
- )
- JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
+from ..types import Ulimit, LogConfig # flake8: noqa
-
-class DictType(dict):
- def __init__(self, init):
- for k, v in six.iteritems(init):
- self[k] = v
-
-
-class LogConfig(DictType):
- types = LogConfigTypesEnum
-
- def __init__(self, **kwargs):
- log_driver_type = kwargs.get('type', kwargs.get('Type'))
- config = kwargs.get('config', kwargs.get('Config')) or {}
-
- if config and not isinstance(config, dict):
- raise ValueError("LogConfig.config must be a dictionary")
-
- super(LogConfig, self).__init__({
- 'Type': log_driver_type,
- 'Config': config
- })
-
- @property
- def type(self):
- return self['Type']
-
- @type.setter
- def type(self, value):
- self['Type'] = value
-
- @property
- def config(self):
- return self['Config']
-
- def set_config_value(self, key, value):
- self.config[key] = value
-
- def unset_config(self, key):
- if key in self.config:
- del self.config[key]
-
-
-class Ulimit(DictType):
- def __init__(self, **kwargs):
- name = kwargs.get('name', kwargs.get('Name'))
- soft = kwargs.get('soft', kwargs.get('Soft'))
- hard = kwargs.get('hard', kwargs.get('Hard'))
- if not isinstance(name, six.string_types):
- raise ValueError("Ulimit.name must be a string")
- if soft and not isinstance(soft, int):
- raise ValueError("Ulimit.soft must be an integer")
- if hard and not isinstance(hard, int):
- raise ValueError("Ulimit.hard must be an integer")
- super(Ulimit, self).__init__({
- 'Name': name,
- 'Soft': soft,
- 'Hard': hard
- })
-
- @property
- def name(self):
- return self['Name']
-
- @name.setter
- def name(self, value):
- self['Name'] = value
-
- @property
- def soft(self):
- return self.get('Soft')
-
- @soft.setter
- def soft(self, value):
- self['Soft'] = value
-
- @property
- def hard(self):
- return self.get('Hard')
-
- @hard.setter
- def hard(self, value):
- self['Hard'] = value
+warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 2ef8ef0..d46f8fc 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,17 +1,3 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
import base64
import io
import os
@@ -22,8 +8,8 @@ import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
-from fnmatch import fnmatch
from datetime import datetime
+from fnmatch import fnmatch
import requests
import six
@@ -31,11 +17,17 @@ import six
from .. import constants
from .. import errors
from .. import tls
-from .types import Ulimit, LogConfig
+from ..types import Ulimit, LogConfig
+if six.PY2:
+ from urllib import splitnport
+else:
+ from urllib.parse import splitnport
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
BYTE_UNITS = {
'b': 1,
'k': 1024,
@@ -385,12 +377,11 @@ def parse_repository_tag(repo_name):
# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
proto = "http+unix"
- host = DEFAULT_HTTP_HOST
port = None
path = ''
if not addr and is_win32:
- addr = '{0}:{1}'.format(DEFAULT_HTTP_HOST, 2375)
+ addr = DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
@@ -425,32 +416,27 @@ def parse_host(addr, is_win32=False, tls=False):
)
proto = "https" if tls else "http"
- if proto != "http+unix" and ":" in addr:
- host_parts = addr.split(':')
- if len(host_parts) != 2:
- raise errors.DockerException(
- "Invalid bind address format: {0}".format(addr)
- )
- if host_parts[0]:
- host = host_parts[0]
+ if proto in ("http", "https"):
+ address_parts = addr.split('/', 1)
+ host = address_parts[0]
+ if len(address_parts) == 2:
+ path = '/' + address_parts[1]
+ host, port = splitnport(host)
- port = host_parts[1]
- if '/' in port:
- port, path = port.split('/', 1)
- path = '/{0}'.format(path)
- try:
- port = int(port)
- except Exception:
+ if port is None:
raise errors.DockerException(
"Invalid port: {0}".format(addr)
)
- elif proto in ("http", "https") and ':' not in addr:
- raise errors.DockerException(
- "Bind address needs a port: {0}".format(addr))
+ if not host:
+ host = DEFAULT_HTTP_HOST
else:
host = addr
+ if proto in ("http", "https") and port == -1:
+ raise errors.DockerException(
+ "Bind address needs a port: {0}".format(addr))
+
if proto == "http+unix" or proto == 'npipe':
return "{0}://{1}".format(proto, host)
return "{0}://{1}:{2}{3}".format(proto, host, port, path)
@@ -613,14 +599,17 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
- mem_limit=None, memswap_limit=None, mem_swappiness=None,
- cgroup_parent=None, group_add=None, cpu_quota=None,
+ mem_limit=None, memswap_limit=None,
+ mem_reservation=None, kernel_memory=None,
+ mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None,
cpu_period=None, blkio_weight=None,
blkio_weight_device=None, device_read_bps=None,
device_write_bps=None, device_read_iops=None,
device_write_iops=None, oom_kill_disable=False,
- shm_size=None, version=None, tmpfs=None,
- oom_score_adj=None):
+ shm_size=None, sysctls=None, version=None, tmpfs=None,
+ oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, pids_limit=None):
host_config = {}
@@ -637,6 +626,18 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
if memswap_limit is not None:
host_config['MemorySwap'] = parse_bytes(memswap_limit)
+ if mem_reservation:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('mem_reservation', '1.21')
+
+ host_config['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('kernel_memory', '1.21')
+
+ host_config['KernelMemory'] = parse_bytes(kernel_memory)
+
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
@@ -719,12 +720,25 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
if dns is not None:
host_config['Dns'] = dns
+ if dns_opt is not None:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('dns_opt', '1.21')
+
+ host_config['DnsOptions'] = dns_opt
+
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error('security_opt', security_opt, 'list')
host_config['SecurityOpt'] = security_opt
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ host_config['Sysctls'] = {}
+ for k, v in six.iteritems(sysctls):
+ host_config['Sysctls'][k] = six.text_type(v)
+
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
@@ -796,6 +810,21 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
host_config['CpuPeriod'] = cpu_period
+ if cpu_shares:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpu_shares', '1.18')
+
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ host_config['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpuset_cpus', '1.18')
+
+ host_config['CpuSetCpus'] = cpuset_cpus
+
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error('blkio_weight', blkio_weight, 'int')
@@ -853,6 +882,21 @@ def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
raise host_config_version_error('tmpfs', '1.22')
host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ host_config['UsernsMode'] = userns_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ host_config["PidsLimit"] = pids_limit
+
return host_config
@@ -873,7 +917,8 @@ def create_networking_config(endpoints_config=None):
def create_endpoint_config(version, aliases=None, links=None,
- ipv4_address=None, ipv6_address=None):
+ ipv4_address=None, ipv6_address=None,
+ link_local_ips=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@@ -893,6 +938,13 @@ def create_endpoint_config(version, aliases=None, links=None,
if ipv6_address:
ipam_config['IPv6Address'] = ipv6_address
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
if ipam_config:
endpoint_config['IPAMConfig'] = ipam_config
@@ -934,7 +986,7 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
- return '{key}={value}'.format(key=key, value=value)
+ return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in six.iteritems(environment)]
@@ -960,6 +1012,14 @@ def create_container_config(
'labels were only introduced in API version 1.18'
)
+ if cpuset is not None or cpu_shares is not None:
+ if version_gte(version, '1.18'):
+ warnings.warn(
+ 'The cpuset_cpus and cpu_shares options have been moved to '
+ 'host_config in API version 1.18, and will be removed',
+ DeprecationWarning
+ )
+
if stop_signal is not None and compare_version('1.21', version) < 0:
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
@@ -989,6 +1049,7 @@ def create_container_config(
if mem_limit is not None:
mem_limit = parse_bytes(mem_limit)
+
if memswap_limit is not None:
memswap_limit = parse_bytes(memswap_limit)
diff --git a/docker/version.py b/docker/version.py
index 95405c7..2bf8436 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.9.0"
+version = "1.10.3"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/docker_py.egg-info/PKG-INFO b/docker_py.egg-info/PKG-INFO
index 79c1598..e468347 100644
--- a/docker_py.egg-info/PKG-INFO
+++ b/docker_py.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: docker-py
-Version: 1.9.0
+Version: 1.10.3
Summary: Python client for Docker.
Home-page: https://github.com/docker/docker-py/
Author: UNKNOWN
diff --git a/docker_py.egg-info/SOURCES.txt b/docker_py.egg-info/SOURCES.txt
index 69cc77b..10cd950 100644
--- a/docker_py.egg-info/SOURCES.txt
+++ b/docker_py.egg-info/SOURCES.txt
@@ -19,6 +19,8 @@ docker/api/daemon.py
docker/api/exec_api.py
docker/api/image.py
docker/api/network.py
+docker/api/service.py
+docker/api/swarm.py
docker/api/volume.py
docker/auth/__init__.py
docker/auth/auth.py
@@ -28,8 +30,14 @@ docker/transport/__init__.py
docker/transport/npipeconn.py
docker/transport/npipesocket.py
docker/transport/unixconn.py
+docker/types/__init__.py
+docker/types/base.py
+docker/types/containers.py
+docker/types/services.py
+docker/types/swarm.py
docker/utils/__init__.py
docker/utils/decorators.py
+docker/utils/socket.py
docker/utils/types.py
docker/utils/utils.py
docker/utils/ports/__init__.py
@@ -52,6 +60,8 @@ tests/integration/exec_test.py
tests/integration/image_test.py
tests/integration/network_test.py
tests/integration/regression_test.py
+tests/integration/service_test.py
+tests/integration/swarm_test.py
tests/integration/volume_test.py
tests/unit/__init__.py
tests/unit/api_test.py
diff --git a/docker_py.egg-info/requires.txt b/docker_py.egg-info/requires.txt
index 111527c..551a0eb 100644
--- a/docker_py.egg-info/requires.txt
+++ b/docker_py.egg-info/requires.txt
@@ -1,6 +1,7 @@
-requests >= 2.5.2
+requests >= 2.5.2, < 2.11
six >= 1.4.0
websocket-client >= 0.32.0
+docker-pycreds >= 0.2.1
[:python_version < "3.3"]
ipaddress >= 1.0.16
diff --git a/requirements.txt b/requirements.txt
index a79b7bf..1e52846 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,4 +2,5 @@ requests==2.5.3
six>=1.4.0
websocket-client==0.32.0
backports.ssl_match_hostname>=3.5 ; python_version < '3.5'
-ipaddress==1.0.16 ; python_version < '3.3' \ No newline at end of file
+ipaddress==1.0.16 ; python_version < '3.3'
+docker-pycreds==0.2.1 \ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
index 6f08d0e..19cf102 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,9 @@
[bdist_wheel]
universal = 1
+[metadata]
+description_file = README.rst
+
[egg_info]
tag_build =
tag_date = 0
diff --git a/setup.py b/setup.py
index ac58b1f..9233ac2 100644
--- a/setup.py
+++ b/setup.py
@@ -9,9 +9,10 @@ ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2',
+ 'requests >= 2.5.2, < 2.11',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
+ 'docker-pycreds >= 0.2.1'
]
if sys.platform == 'win32':
@@ -36,7 +37,8 @@ setup(
url='https://github.com/docker/docker-py/',
packages=[
'docker', 'docker.api', 'docker.auth', 'docker.transport',
- 'docker.utils', 'docker.utils.ports', 'docker.ssladapter'
+ 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
+ 'docker.types',
],
install_requires=requirements,
tests_require=test_requirements,
diff --git a/tests/helpers.py b/tests/helpers.py
index 21036ac..40baef9 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,9 +1,6 @@
-import errno
import os
import os.path
-import select
import shutil
-import struct
import tarfile
import tempfile
import unittest
@@ -48,15 +45,6 @@ def untar_file(tardata, filename):
return result
-def exec_driver_is_native():
- global EXEC_DRIVER
- if not EXEC_DRIVER:
- c = docker_client()
- EXEC_DRIVER = c.info()['ExecutionDriver']
- c.close()
- return EXEC_DRIVER.startswith('native')
-
-
def docker_client(**kwargs):
return docker.Client(**docker_client_kwargs(**kwargs))
@@ -67,49 +55,6 @@ def docker_client_kwargs(**kwargs):
return client_kwargs
-def read_socket(socket, n=4096):
- """ Code stolen from dockerpty to read the socket """
- recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
-
- # wait for data to become available
- select.select([socket], [], [])
-
- try:
- if hasattr(socket, 'recv'):
- return socket.recv(n)
- return os.read(socket.fileno(), n)
- except EnvironmentError as e:
- if e.errno not in recoverable_errors:
- raise
-
-
-def next_packet_size(socket):
- """ Code stolen from dockerpty to get the next packet size """
- data = six.binary_type()
- while len(data) < 8:
- next_data = read_socket(socket, 8 - len(data))
- if not next_data:
- return 0
- data = data + next_data
-
- if data is None:
- return 0
-
- if len(data) == 8:
- _, actual = struct.unpack('>BxxxL', data)
- return actual
-
-
-def read_data(socket, packet_size):
- data = six.binary_type()
- while len(data) < packet_size:
- next_data = read_socket(socket, packet_size - len(data))
- if not next_data:
- assert False, "Failed trying to read in the dataz"
- data += next_data
- return data
-
-
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py
index 56b648a..27d3046 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/container_test.py
@@ -3,6 +3,8 @@ import signal
import tempfile
import docker
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
import pytest
import six
@@ -157,9 +159,6 @@ class CreateContainerTest(helpers.BaseTestCase):
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
def create_container_readonly_fs(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
ctnr = self.client.create_container(
BUSYBOX, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
@@ -290,7 +289,7 @@ class CreateContainerTest(helpers.BaseTestCase):
)
self.client.start(container)
- assert expected_msg in str(excinfo.value)
+ assert six.b(expected_msg) in excinfo.value.explanation
def test_valid_no_log_driver_specified(self):
log_config = docker.utils.LogConfig(
@@ -804,8 +803,7 @@ class KillTest(helpers.BaseTestCase):
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
- if helpers.exec_driver_is_native():
- self.assertNotEqual(state['ExitCode'], 0)
+ self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
@@ -819,8 +817,7 @@ class KillTest(helpers.BaseTestCase):
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
- if helpers.exec_driver_is_native():
- self.assertNotEqual(state['ExitCode'], 0)
+ self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
@@ -1025,9 +1022,9 @@ class AttachContainerTest(helpers.BaseTestCase):
self.client.start(ident)
- next_size = helpers.next_packet_size(pty_stdout)
+ next_size = next_frame_size(pty_stdout)
self.assertEqual(next_size, len(line))
- data = helpers.read_data(pty_stdout, next_size)
+ data = read_exactly(pty_stdout, next_size)
self.assertEqual(data.decode('utf-8'), line)
@@ -1099,11 +1096,38 @@ class ContainerUpdateTest(helpers.BaseTestCase):
container = self.client.create_container(
BUSYBOX, 'top', host_config=self.client.create_host_config(
mem_limit=old_mem_limit
- ), cpu_shares=102
+ )
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.update_container(container, mem_limit=new_mem_limit)
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
- self.assertEqual(inspect_data['HostConfig']['CpuShares'], 102)
+
+
+class ContainerCPUTest(helpers.BaseTestCase):
+ @requires_api_version('1.18')
+ def test_container_cpu_shares(self):
+ cpu_shares = 512
+ container = self.client.create_container(
+ BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ cpu_shares=cpu_shares
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ inspect_data = self.client.inspect_container(container)
+ self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512)
+
+ @requires_api_version('1.18')
+ def test_container_cpuset(self):
+ cpuset_cpus = "0,1"
+ container = self.client.create_container(
+ BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ cpuset_cpus=cpuset_cpus
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ inspect_data = self.client.inspect_container(container)
+ self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py
index 9f54808..f377e09 100644
--- a/tests/integration/exec_test.py
+++ b/tests/integration/exec_test.py
@@ -1,4 +1,5 @@
-import pytest
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
from .. import helpers
@@ -7,9 +8,6 @@ BUSYBOX = helpers.BUSYBOX
class ExecTest(helpers.BaseTestCase):
def test_execute_command(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
@@ -23,9 +21,6 @@ class ExecTest(helpers.BaseTestCase):
self.assertEqual(exec_log, b'hello\n')
def test_exec_command_string(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
@@ -39,9 +34,6 @@ class ExecTest(helpers.BaseTestCase):
self.assertEqual(exec_log, b'hello world\n')
def test_exec_command_as_user(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
@@ -55,9 +47,6 @@ class ExecTest(helpers.BaseTestCase):
self.assertEqual(exec_log, b'default\n')
def test_exec_command_as_root(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
@@ -71,9 +60,6 @@ class ExecTest(helpers.BaseTestCase):
self.assertEqual(exec_log, b'root\n')
def test_exec_command_streaming(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
@@ -89,9 +75,6 @@ class ExecTest(helpers.BaseTestCase):
self.assertEqual(res, b'hello\nworld\n')
def test_exec_start_socket(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
@@ -107,15 +90,12 @@ class ExecTest(helpers.BaseTestCase):
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
- next_size = helpers.next_packet_size(socket)
+ next_size = next_frame_size(socket)
self.assertEqual(next_size, len(line))
- data = helpers.read_data(socket, next_size)
+ data = read_exactly(socket, next_size)
self.assertEqual(data.decode('utf-8'), line)
def test_exec_inspect(self):
- if not helpers.exec_driver_is_native():
- pytest.skip('Exec driver not native')
-
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
diff --git a/tests/integration/image_test.py b/tests/integration/image_test.py
index 9f38366..a61b58a 100644
--- a/tests/integration/image_test.py
+++ b/tests/integration/image_test.py
@@ -208,6 +208,48 @@ class ImportImageTest(helpers.BaseTestCase):
img_id = result['status']
self.tmp_imgs.append(img_id)
+ def test_import_image_from_data_with_changes(self):
+ with self.dummy_tar_stream(n_bytes=500) as f:
+ content = f.read()
+
+ statuses = self.client.import_image_from_data(
+ content, repository='test/import-from-bytes',
+ changes=['USER foobar', 'CMD ["echo"]']
+ )
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ img_data = self.client.inspect_image(img_id)
+ assert img_data is not None
+ assert img_data['Config']['Cmd'] == ['echo']
+ assert img_data['Config']['User'] == 'foobar'
+
+ def test_import_image_with_changes(self):
+ with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
+ statuses = self.client.import_image(
+ src=tar_filename, repository='test/import-from-file',
+ changes=['USER foobar', 'CMD ["echo"]']
+ )
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ img_data = self.client.inspect_image(img_id)
+ assert img_data is not None
+ assert img_data['Config']['Cmd'] == ['echo']
+ assert img_data['Config']['User'] == 'foobar'
+
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py
index f719fea..6726db4 100644
--- a/tests/integration/network_test.py
+++ b/tests/integration/network_test.py
@@ -115,7 +115,8 @@ class TestNetworks(helpers.BaseTestCase):
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
- [container['Id']])
+ [container['Id']]
+ )
with pytest.raises(docker.errors.APIError):
self.client.connect_container_to_network(container, net_id)
@@ -128,6 +129,33 @@ class TestNetworks(helpers.BaseTestCase):
self.client.disconnect_container_from_network(container, net_id)
@requires_api_version('1.22')
+ def test_connect_and_force_disconnect_container(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container('busybox', 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ network_data = self.client.inspect_network(net_id)
+ self.assertFalse(network_data.get('Containers'))
+
+ self.client.connect_container_to_network(container, net_id)
+ network_data = self.client.inspect_network(net_id)
+ self.assertEqual(
+ list(network_data['Containers'].keys()),
+ [container['Id']]
+ )
+
+ self.client.disconnect_container_from_network(container, net_id, True)
+ network_data = self.client.inspect_network(net_id)
+ self.assertFalse(network_data.get('Containers'))
+
+ with pytest.raises(docker.errors.APIError):
+ self.client.disconnect_container_from_network(
+ container, net_id, force=True
+ )
+
+ @requires_api_version('1.22')
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
@@ -249,6 +277,27 @@ class TestNetworks(helpers.BaseTestCase):
'2001:389::f00d'
)
+ @requires_api_version('1.24')
+ def test_create_with_linklocal_ips(self):
+ container = self.client.create_container(
+ 'busybox', 'top',
+ networking_config=self.client.create_networking_config(
+ {
+ 'bridge': self.client.create_endpoint_config(
+ link_local_ips=['169.254.8.8']
+ )
+ }
+ ),
+ host_config=self.client.create_host_config(network_mode='bridge')
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ container_data = self.client.inspect_container(container)
+ net_cfg = container_data['NetworkSettings']['Networks']['bridge']
+ assert 'IPAMConfig' in net_cfg
+ assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
+ assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
+
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()
@@ -279,7 +328,8 @@ class TestNetworks(helpers.BaseTestCase):
net_name, net_id = self.create_network()
with self.assertRaises(docker.errors.APIError):
self.client.create_network(net_name, check_duplicate=True)
- self.client.create_network(net_name, check_duplicate=False)
+ net_id = self.client.create_network(net_name, check_duplicate=False)
+ self.tmp_networks.append(net_id['Id'])
@requires_api_version('1.22')
def test_connect_with_links(self):
@@ -366,3 +416,27 @@ class TestNetworks(helpers.BaseTestCase):
_, net_id = self.create_network(internal=True)
net = self.client.inspect_network(net_id)
assert net['Internal'] is True
+
+ @requires_api_version('1.23')
+ def test_create_network_with_labels(self):
+ _, net_id = self.create_network(labels={
+ 'com.docker.py.test': 'label'
+ })
+
+ net = self.client.inspect_network(net_id)
+ assert 'Labels' in net
+ assert len(net['Labels']) == 1
+ assert net['Labels'] == {
+ 'com.docker.py.test': 'label'
+ }
+
+ @requires_api_version('1.23')
+ def test_create_network_with_labels_wrong_type(self):
+ with pytest.raises(TypeError):
+ self.create_network(labels=['com.docker.py.test=label', ])
+
+ @requires_api_version('1.23')
+ def test_create_network_ipv6_enabled(self):
+ _, net_id = self.create_network(enable_ipv6=True)
+ net = self.client.inspect_network(net_id)
+ assert net['EnableIPv6'] is True
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
new file mode 100644
index 0000000..2b99316
--- /dev/null
+++ b/tests/integration/service_test.py
@@ -0,0 +1,189 @@
+import random
+
+import docker
+
+from ..base import requires_api_version
+from .. import helpers
+
+
+BUSYBOX = helpers.BUSYBOX
+
+
+class ServiceTest(helpers.BaseTestCase):
+ def setUp(self):
+ super(ServiceTest, self).setUp()
+ try:
+ self.client.leave_swarm(force=True)
+ except docker.errors.APIError:
+ pass
+ self.client.init_swarm('eth0')
+
+ def tearDown(self):
+ super(ServiceTest, self).tearDown()
+ for service in self.client.services(filters={'name': 'dockerpytest_'}):
+ try:
+ self.client.remove_service(service['ID'])
+ except docker.errors.APIError:
+ pass
+ try:
+ self.client.leave_swarm(force=True)
+ except docker.errors.APIError:
+ pass
+
+ def get_service_name(self):
+ return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+ def create_simple_service(self, name=None):
+ if name:
+ name = 'dockerpytest_{0}'.format(name)
+ else:
+ name = self.get_service_name()
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ return name, self.client.create_service(task_tmpl, name=name)
+
+ @requires_api_version('1.24')
+ def test_list_services(self):
+ services = self.client.services()
+ assert isinstance(services, list)
+
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+ self.create_simple_service()
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 1
+ assert 'dockerpytest_' in test_services[0]['Spec']['Name']
+
+ def test_inspect_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_inspect_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_name)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_remove_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_id)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_remove_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_name)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_create_service_simple(self):
+ name, svc_id = self.create_simple_service()
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+
+ def test_create_service_custom_log_driver(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ log_cfg = docker.types.DriverConfig('none')
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, log_driver=log_cfg
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'LogDriver' in res_template
+ assert 'Name' in res_template['LogDriver']
+ assert res_template['LogDriver']['Name'] == 'none'
+
+ def test_create_service_with_volume_mount(self):
+ vol_name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['ls'],
+ mounts=[
+ docker.types.Mount(target='/test', source=vol_name)
+ ]
+ )
+ self.tmp_volumes.append(vol_name)
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Mounts' in cspec
+ assert len(cspec['Mounts']) == 1
+ mount = cspec['Mounts'][0]
+ assert mount['Target'] == '/test'
+ assert mount['Source'] == vol_name
+ assert mount['Type'] == 'volume'
+
+ def test_create_service_with_resources_constraints(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ resources = docker.types.Resources(
+ cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
+ cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ assert res_template['Resources']['Limits'] == resources['Limits']
+ assert res_template['Resources']['Reservations'] == resources[
+ 'Reservations'
+ ]
+
+ def test_create_service_with_update_config(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ assert update_config == svc_info['Spec']['UpdateConfig']
+
+ def test_create_service_with_restart_policy(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ policy = docker.types.RestartPolicy(
+ docker.types.RestartPolicy.condition_types.ANY,
+ delay=5, max_attempts=5
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, restart_policy=policy
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
+ assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
+
+ def test_update_service_name(self):
+ name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ svc_version = svc_info['Version']['Index']
+ new_name = self.get_service_name()
+ assert self.client.update_service(
+ svc_id, svc_version, name=new_name,
+ task_template=svc_info['Spec']['TaskTemplate']
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert svc_info['Spec']['Name'] == new_name
diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py
new file mode 100644
index 0000000..128628e
--- /dev/null
+++ b/tests/integration/swarm_test.py
@@ -0,0 +1,145 @@
+import docker
+import pytest
+
+from ..base import requires_api_version
+from .. import helpers
+
+
+BUSYBOX = helpers.BUSYBOX
+
+
+class SwarmTest(helpers.BaseTestCase):
+ def setUp(self):
+ super(SwarmTest, self).setUp()
+ try:
+ self.client.leave_swarm(force=True)
+ except docker.errors.APIError:
+ pass
+
+ def tearDown(self):
+ super(SwarmTest, self).tearDown()
+ try:
+ self.client.leave_swarm(force=True)
+ except docker.errors.APIError:
+ pass
+
+ @requires_api_version('1.24')
+ def test_init_swarm_simple(self):
+ assert self.client.init_swarm('eth0')
+
+ @requires_api_version('1.24')
+ def test_init_swarm_force_new_cluster(self):
+ pytest.skip('Test stalls the engine on 1.12.0')
+
+ assert self.client.init_swarm('eth0')
+ version_1 = self.client.inspect_swarm()['Version']['Index']
+ assert self.client.init_swarm('eth0', force_new_cluster=True)
+ version_2 = self.client.inspect_swarm()['Version']['Index']
+ assert version_2 != version_1
+
+ @requires_api_version('1.24')
+ def test_init_already_in_cluster(self):
+ assert self.client.init_swarm('eth0')
+ with pytest.raises(docker.errors.APIError):
+ self.client.init_swarm('eth0')
+
+ @requires_api_version('1.24')
+ def test_init_swarm_custom_raft_spec(self):
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ assert self.client.init_swarm(
+ advertise_addr='eth0', swarm_spec=spec
+ )
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+
+ @requires_api_version('1.24')
+ def test_leave_swarm(self):
+ assert self.client.init_swarm('eth0')
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.leave_swarm()
+ exc_info.value.response.status_code == 500
+ assert self.client.leave_swarm(force=True)
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.inspect_swarm()
+ exc_info.value.response.status_code == 406
+
+ @requires_api_version('1.24')
+ def test_update_swarm(self):
+ assert self.client.init_swarm('eth0')
+ swarm_info_1 = self.client.inspect_swarm()
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200,
+ node_cert_expiry=7776000000000000
+ )
+ assert self.client.update_swarm(
+ version=swarm_info_1['Version']['Index'],
+ swarm_spec=spec, rotate_worker_token=True
+ )
+ swarm_info_2 = self.client.inspect_swarm()
+
+ assert (
+ swarm_info_1['Version']['Index'] !=
+ swarm_info_2['Version']['Index']
+ )
+ assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert (
+ swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Manager'] ==
+ swarm_info_2['JoinTokens']['Manager']
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Worker'] !=
+ swarm_info_2['JoinTokens']['Worker']
+ )
+
+ @requires_api_version('1.24')
+ def test_update_swarm_name(self):
+ assert self.client.init_swarm('eth0')
+ swarm_info_1 = self.client.inspect_swarm()
+ spec = self.client.create_swarm_spec(
+ node_cert_expiry=7776000000000000, name='reimuhakurei'
+ )
+ assert self.client.update_swarm(
+ version=swarm_info_1['Version']['Index'], swarm_spec=spec
+ )
+ swarm_info_2 = self.client.inspect_swarm()
+
+ assert (
+ swarm_info_1['Version']['Index'] !=
+ swarm_info_2['Version']['Index']
+ )
+ assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
+
+ @requires_api_version('1.24')
+ def test_list_nodes(self):
+ assert self.client.init_swarm('eth0')
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ assert 'ID' in node
+ assert 'Spec' in node
+ assert node['Spec']['Role'] == 'manager'
+
+ filtered_list = self.client.nodes(filters={
+ 'id': node['ID']
+ })
+ assert len(filtered_list) == 1
+ filtered_list = self.client.nodes(filters={
+ 'role': 'worker'
+ })
+ assert len(filtered_list) == 0
+
+ @requires_api_version('1.24')
+ def test_inspect_node(self):
+ assert self.client.init_swarm('eth0')
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ node_data = self.client.inspect_node(node['ID'])
+ assert node['ID'] == node_data['ID']
+ assert node['Version'] == node_data['Version']
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index bfe196c..389b5f5 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,17 +1,3 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
import datetime
import json
import os
@@ -22,9 +8,11 @@ import sys
import tempfile
import threading
import time
+import io
import docker
import requests
+from requests.packages import urllib3
import six
from .. import base
@@ -42,7 +30,7 @@ DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
- request=None):
+ request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
@@ -52,6 +40,7 @@ def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
+ res.raw = raw
return res
@@ -93,6 +82,10 @@ def fake_put(self, url, *args, **kwargs):
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
+
+def fake_read_from_socket(self, response, stream):
+ return six.binary_type()
+
url_base = 'http+docker://localunixsocket/'
url_prefix = '{0}v{1}/'.format(
url_base,
@@ -103,7 +96,8 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.Client', get=fake_get, post=fake_post, put=fake_put,
- delete=fake_delete
+ delete=fake_delete,
+ _read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = docker.Client()
@@ -154,9 +148,15 @@ class DockerApiTest(DockerClientTest):
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
- url = self.client._url('/hello/{0}/world', '/some?name')
+ url = self.client._url('/hello/{0}/world', 'some?name')
self.assertEqual(
- url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world')
+ url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
+ )
+
+ url = self.client._url("/images/{0}/push", "localhost:5000/image")
+ self.assertEqual(
+ url,
+ '{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
)
def test_url_invalid_resource(self):
@@ -317,6 +317,43 @@ class DockerApiTest(DockerClientTest):
TypeError, self.client.create_host_config, security_opt='wrong'
)
+ def test_stream_helper_decoding(self):
+ status_code, content = fake_api.fake_responses[url_prefix + 'events']()
+ content_str = json.dumps(content)
+ if six.PY3:
+ content_str = content_str.encode('utf-8')
+ body = io.BytesIO(content_str)
+
+ # mock a stream interface
+ raw_resp = urllib3.HTTPResponse(body=body)
+ setattr(raw_resp._fp, 'chunked', True)
+ setattr(raw_resp._fp, 'chunk_left', len(body.getvalue())-1)
+
+ # pass `decode=False` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ self.assertEqual(result, content_str)
+
+ # pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ self.assertEqual(result, content)
+
+ # non-chunked response, pass `decode=False` to the helper
+ setattr(raw_resp._fp, 'chunked', False)
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ self.assertEqual(result, content_str.decode('utf-8'))
+
+ # non-chunked response, pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ self.assertEqual(result, content)
+
class StreamTest(base.Cleanup, base.BaseTestCase):
def setUp(self):
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 4ea4047..f395133 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -460,4 +460,28 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {}
+ assert cfg == {'scarlet.net': {}}
+
+ def test_load_config_identity_token(self):
+ folder = tempfile.mkdtemp()
+ registry = 'scarlet.net'
+ token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': auth_entry,
+ 'identitytoken': token
+ }
+ }
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert registry in cfg
+ cfg = cfg[registry]
+ assert 'IdentityToken' in cfg
+ assert cfg['IdentityToken'] == token
diff --git a/tests/unit/build_test.py b/tests/unit/build_test.py
index 414153e..b2705eb 100644
--- a/tests/unit/build_test.py
+++ b/tests/unit/build_test.py
@@ -2,8 +2,9 @@ import gzip
import io
import docker
+from docker import auth
-from .api_test import DockerClientTest
+from .api_test import DockerClientTest, fake_request, url_prefix
class BuildTest(DockerClientTest):
@@ -83,8 +84,25 @@ class BuildTest(DockerClientTest):
}
}
+ expected_params = {'t': None, 'q': False, 'dockerfile': None,
+ 'rm': False, 'nocache': False, 'pull': False,
+ 'forcerm': False,
+ 'remote': 'https://github.com/docker-library/mongo'}
+ expected_headers = {
+ 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+
self.client.build(path='https://github.com/docker-library/mongo')
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'build',
+ stream=True,
+ data=None,
+ headers=expected_headers,
+ params=expected_params,
+ timeout=None
+ )
+
def test_build_container_with_named_dockerfile(self):
self.client.build('.', dockerfile='nameddockerfile')
@@ -103,3 +121,44 @@ class BuildTest(DockerClientTest):
'foo': 'bar'
})
)
+
+ def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
+ self.client._auth_configs = {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
+ }
+
+ headers = {}
+ expected_headers = {
+ 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ self.client._set_auth_headers(headers)
+ self.assertEqual(headers, expected_headers)
+
+ def test_set_auth_headers_with_dict_and_auth_configs(self):
+ self.client._auth_configs = {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
+ }
+
+ headers = {'foo': 'bar'}
+ expected_headers = {
+ 'foo': 'bar',
+ 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+
+ self.client._set_auth_headers(headers)
+ self.assertEqual(headers, expected_headers)
+
+ def test_set_auth_headers_with_dict_and_no_auth_configs(self):
+ headers = {'foo': 'bar'}
+ expected_headers = {
+ 'foo': 'bar'
+ }
+
+ self.client._set_auth_headers(headers)
+ self.assertEqual(headers, expected_headers)
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index b21f1d6..6ceb8cb 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -25,6 +25,14 @@ class ClientTest(base.BaseTestCase):
client = Client.from_env()
self.assertEqual(client.base_url, "https://192.168.59.103:2376")
+ def test_from_env_with_version(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ client = Client.from_env(version='2.32')
+ self.assertEqual(client.base_url, "https://192.168.59.103:2376")
+ self.assertEqual(client._version, '2.32')
+
class DisableSocketTest(base.BaseTestCase):
class DummySocket(object):
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
index 2a72c17..8871b85 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/container_test.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
import datetime
import json
import signal
@@ -286,6 +288,33 @@ class CreateContainerTest(DockerClientTest):
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
+ @requires_api_version('1.18')
+ def test_create_container_with_host_config_cpu_shares(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpu_shares=512
+ )
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1],
+ url_prefix + 'containers/create')
+
+ self.assertEqual(json.loads(args[1]['data']),
+ json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuShares": 512,
+ "NetworkMode": "default"
+ }}'''))
+ self.assertEqual(args[1]['headers'],
+ {'Content-Type': 'application/json'})
+
def test_create_container_with_cpuset(self):
self.client.create_container('busybox', 'ls',
cpuset='0,1')
@@ -306,6 +335,33 @@ class CreateContainerTest(DockerClientTest):
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
+ @requires_api_version('1.18')
+ def test_create_container_with_host_config_cpuset(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpuset_cpus='0,1'
+ )
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1],
+ url_prefix + 'containers/create')
+
+ self.assertEqual(json.loads(args[1]['data']),
+ json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuSetCpus": "0,1",
+ "NetworkMode": "default"
+ }}'''))
+ self.assertEqual(args[1]['headers'],
+ {'Content-Type': 'application/json'})
+
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -695,14 +751,18 @@ class CreateContainerTest(DockerClientTest):
)
def test_create_container_with_mac_address(self):
- mac_address_expected = "02:42:ac:11:00:0a"
+ expected = "02:42:ac:11:00:0a"
- container = self.client.create_container(
- 'busybox', ['sleep', '60'], mac_address=mac_address_expected)
+ self.client.create_container(
+ 'busybox',
+ ['sleep', '60'],
+ mac_address=expected
+ )
- res = self.client.inspect_container(container['Id'])
- self.assertEqual(mac_address_expected,
- res['NetworkSettings']['MacAddress'])
+ args = fake_request.call_args
+ self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ data = json.loads(args[1]['data'])
+ assert data['MacAddress'] == expected
def test_create_container_with_links(self):
link_path = 'path'
@@ -1074,6 +1134,51 @@ class CreateContainerTest(DockerClientTest):
DEFAULT_TIMEOUT_SECONDS
)
+ @requires_api_version('1.24')
+ def test_create_container_with_sysctl(self):
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(
+ sysctls={
+ 'net.core.somaxconn': 1024,
+ 'net.ipv4.tcp_syncookies': '0',
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Sysctls'] = {
+ 'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
+ }
+ self.assertEqual(json.loads(args[1]['data']), expected_payload)
+ self.assertEqual(
+ args[1]['headers'], {'Content-Type': 'application/json'}
+ )
+ self.assertEqual(
+ args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_create_container_with_unicode_envvars(self):
+ envvars_dict = {
+ 'foo': u'☃',
+ }
+
+ expected = [
+ u'foo=☃'
+ ]
+
+ self.client.create_container(
+ 'busybox', 'true',
+ environment=envvars_dict,
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
+
class ContainerTest(DockerClientTest):
def test_list_containers(self):
diff --git a/tests/unit/exec_test.py b/tests/unit/exec_test.py
index 3007799..6ba2a3d 100644
--- a/tests/unit/exec_test.py
+++ b/tests/unit/exec_test.py
@@ -51,8 +51,36 @@ class ExecTest(DockerClientTest):
}
)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ self.assertEqual(
+ args[1]['headers'], {
+ 'Content-Type': 'application/json',
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+ )
+
+ def test_exec_start_detached(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
+
+ args = fake_request.call_args
+ self.assertEqual(
+ args[0][1], url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+ )
+
+ self.assertEqual(
+ json.loads(args[1]['data']), {
+ 'Tty': False,
+ 'Detach': True
+ }
+ )
+
+ self.assertEqual(
+ args[1]['headers'], {
+ 'Content-Type': 'application/json'
+ }
+ )
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index 9952595..1e9d318 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -1,17 +1,3 @@
-# Copyright 2013 dotCloud inc.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
from . import fake_stat
from docker import constants
@@ -183,35 +169,6 @@ def get_fake_inspect_image():
return status_code, response
-def get_fake_port():
- status_code = 200
- response = {
- 'HostConfig': {
- 'Binds': None,
- 'ContainerIDFile': '',
- 'Links': None,
- 'LxcConf': None,
- 'PortBindings': {
- '1111': None,
- '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
- '2222': None
- },
- 'Privileged': False,
- 'PublishAllPorts': False
- },
- 'NetworkSettings': {
- 'Bridge': 'docker0',
- 'PortMapping': None,
- 'Ports': {
- '1111': None,
- '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
- '2222': None},
- 'MacAddress': '02:42:ac:11:00:0a'
- }
- }
- return status_code, response
-
-
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
@@ -433,7 +390,10 @@ def get_fake_volume():
response = {
'Name': 'perfectcherryblossom',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Labels': {
+ 'com.example.some-label': 'some-value'
+ }
}
return status_code, response
@@ -506,8 +466,6 @@ fake_responses = {
post_fake_pause_container,
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
post_fake_unpause_container,
- '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
- get_fake_port,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
diff --git a/tests/unit/image_test.py b/tests/unit/image_test.py
index 8fd894c..b2b1dd6 100644
--- a/tests/unit/image_test.py
+++ b/tests/unit/image_test.py
@@ -2,6 +2,7 @@ import docker
import pytest
from . import fake_api
+from docker import auth
from .api_test import (
DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
@@ -262,6 +263,31 @@ class ImageTest(DockerClientTest):
timeout=DEFAULT_TIMEOUT_SECONDS
)
+ def test_push_image_with_auth(self):
+ auth_config = {
+ 'username': "test_user",
+ 'password': "test_password",
+ 'serveraddress': "test_server",
+ }
+ encoded_auth = auth.encode_header(auth_config)
+ self.client.push(
+ fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
+ auth_config=auth_config
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/test_image/push',
+ params={
+ 'tag': fake_api.FAKE_TAG_NAME,
+ },
+ data='{}',
+ headers={'Content-Type': 'application/json',
+ 'X-Registry-Auth': encoded_auth},
+ stream=False,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
def test_push_image_stream(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
index 5bba9db..2521688 100644
--- a/tests/unit/network_test.py
+++ b/tests/unit/network_test.py
@@ -184,4 +184,4 @@ class NetworkTest(DockerClientTest):
self.assertEqual(
json.loads(post.call_args[1]['data']),
- {'container': container_id})
+ {'Container': container_id})
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 128778f..2a2759d 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -20,9 +20,11 @@ from docker.utils import (
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
exclude_paths, convert_volume_binds, decode_json_header, tar,
split_command, create_ipam_config, create_ipam_pool, parse_devices,
+ update_headers,
)
-from docker.utils.utils import create_endpoint_config
+
from docker.utils.ports import build_port_bindings, split_port
+from docker.utils.utils import create_endpoint_config
from .. import base
from ..helpers import make_tree
@@ -34,6 +36,37 @@ TEST_CERT_DIR = os.path.join(
)
+class DecoratorsTest(base.BaseTestCase):
+ def test_update_headers(self):
+ sample_headers = {
+ 'X-Docker-Locale': 'en-US',
+ }
+
+ def f(self, headers=None):
+ return headers
+
+ client = Client()
+ client._auth_configs = {}
+
+ g = update_headers(f)
+ assert g(client, headers=None) is None
+ assert g(client, headers={}) == {}
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ }
+
+ client._auth_configs = {
+ 'HttpHeaders': sample_headers
+ }
+
+ assert g(client, headers=None) == sample_headers
+ assert g(client, headers={}) == sample_headers
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ 'X-Docker-Locale': 'en-US',
+ }
+
+
class HostConfigTest(base.BaseTestCase):
def test_create_host_config_no_options(self):
config = create_host_config(version='1.19')
@@ -98,6 +131,16 @@ class HostConfigTest(base.BaseTestCase):
InvalidVersion, lambda: create_host_config(version='1.18.3',
oom_kill_disable=True))
+ def test_create_host_config_with_userns_mode(self):
+ config = create_host_config(version='1.23', userns_mode='host')
+ self.assertEqual(config.get('UsernsMode'), 'host')
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.22',
+ userns_mode='host'))
+ self.assertRaises(
+ ValueError, lambda: create_host_config(version='1.23',
+ userns_mode='host12'))
+
def test_create_host_config_with_oom_score_adj(self):
config = create_host_config(version='1.22', oom_score_adj=100)
self.assertEqual(config.get('OomScoreAdj'), 100)
@@ -108,6 +151,19 @@ class HostConfigTest(base.BaseTestCase):
TypeError, lambda: create_host_config(version='1.22',
oom_score_adj='100'))
+ def test_create_host_config_with_dns_opt(self):
+
+ tested_opts = ['use-vc', 'no-tld-query']
+ config = create_host_config(version='1.21', dns_opt=tested_opts)
+ dns_opts = config.get('DnsOptions')
+
+ self.assertTrue('use-vc' in dns_opts)
+ self.assertTrue('no-tld-query' in dns_opts)
+
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.20',
+ dns_opt=tested_opts))
+
def test_create_endpoint_config_with_aliases(self):
config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
assert config == {'Aliases': ['foo', 'bar']}
@@ -115,6 +171,29 @@ class HostConfigTest(base.BaseTestCase):
with pytest.raises(InvalidVersion):
create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
+ def test_create_host_config_with_mem_reservation(self):
+ config = create_host_config(version='1.21', mem_reservation=67108864)
+ self.assertEqual(config.get('MemoryReservation'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', mem_reservation=67108864))
+
+ def test_create_host_config_with_kernel_memory(self):
+ config = create_host_config(version='1.21', kernel_memory=67108864)
+ self.assertEqual(config.get('KernelMemory'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', kernel_memory=67108864))
+
+ def test_create_host_config_with_pids_limit(self):
+ config = create_host_config(version='1.23', pids_limit=1024)
+ self.assertEqual(config.get('PidsLimit'), 1024)
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', pids_limit=1024)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.22', pids_limit='1024')
+
class UlimitTest(base.BaseTestCase):
def test_create_host_config_dict_ulimit(self):
@@ -404,10 +483,18 @@ class ParseHostTest(base.BaseTestCase):
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix://var/run/docker.sock',
+ '12.234.45.127:2375/docker/engine': (
+ 'http://12.234.45.127:2375/docker/engine'
+ ),
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
+ '[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
+ 'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
+ '[fd12::82d1]:2375/docker/engine': (
+ 'http://[fd12::82d1]:2375/docker/engine'
+ ),
}
for host in invalid_hosts:
@@ -415,15 +502,15 @@ class ParseHostTest(base.BaseTestCase):
parse_host(host, None)
for host, expected in valid_hosts.items():
- self.assertEqual(parse_host(host, None), expected, msg=host)
+ assert parse_host(host, None) == expected
def test_parse_host_empty_value(self):
unix_socket = 'http+unix://var/run/docker.sock'
- tcp_port = 'http://127.0.0.1:2375'
+ npipe = 'npipe:////./pipe/docker_engine'
for val in [None, '']:
assert parse_host(val, is_win32=False) == unix_socket
- assert parse_host(val, is_win32=True) == tcp_port
+ assert parse_host(val, is_win32=True) == npipe
def test_parse_host_tls(self):
host_value = 'myhost.docker.net:3348'
@@ -602,7 +689,6 @@ class UtilsTest(base.BaseTestCase):
class SplitCommandTest(base.BaseTestCase):
-
def test_split_command_with_unicode(self):
self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
diff --git a/tests/unit/volume_test.py b/tests/unit/volume_test.py
index 5b1823a..136d11a 100644
--- a/tests/unit/volume_test.py
+++ b/tests/unit/volume_test.py
@@ -43,6 +43,22 @@ class VolumeTest(DockerClientTest):
self.assertEqual(args[0][1], url_prefix + 'volumes/create')
self.assertEqual(json.loads(args[1]['data']), {'Name': name})
+ @base.requires_api_version('1.23')
+ def test_create_volume_with_labels(self):
+ name = 'perfectcherryblossom'
+ result = self.client.create_volume(name, labels={
+ 'com.example.some-label': 'some-value'})
+ self.assertEqual(
+ result["Labels"],
+ {'com.example.some-label': 'some-value'}
+ )
+
+ @base.requires_api_version('1.23')
+ def test_create_volume_with_invalid_labels(self):
+ name = 'perfectcherryblossom'
+ with pytest.raises(TypeError):
+ self.client.create_volume(name, labels=1)
+
@base.requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'