summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2019-11-22 20:51:27 -0300
committerFelipe Sateler <fsateler@debian.org>2019-11-22 20:51:27 -0300
commit3f9a8392bb8dd13baef20d58914f822202947664 (patch)
tree2b710defcc6f1351e83f4ddba795032a6444a6c8
parent0e08ec700f3e27bee8da6605cdd93491a14454dd (diff)
parent001f034ab762eab7724e4c2bc955b7f3f3dc8504 (diff)
Merge tag 'upstream/4.1.0'
Upstream version 4.1.0
-rw-r--r--MANIFEST.in1
-rw-r--r--PKG-INFO125
-rw-r--r--README.rst94
-rw-r--r--docker.egg-info/PKG-INFO125
-rw-r--r--docker.egg-info/SOURCES.txt16
-rw-r--r--docker.egg-info/requires.txt10
-rw-r--r--docker/api/build.py57
-rw-r--r--docker/api/client.py76
-rw-r--r--docker/api/config.py2
-rw-r--r--docker/api/container.py114
-rw-r--r--docker/api/daemon.py26
-rw-r--r--docker/api/exec_api.py14
-rw-r--r--docker/api/image.py57
-rw-r--r--docker/api/network.py5
-rw-r--r--docker/api/secret.py2
-rw-r--r--docker/api/service.py62
-rw-r--r--docker/api/swarm.py93
-rw-r--r--docker/auth.py406
-rw-r--r--docker/client.py4
-rw-r--r--docker/constants.py10
-rw-r--r--docker/credentials/__init__.py4
-rw-r--r--docker/credentials/constants.py4
-rw-r--r--docker/credentials/errors.py25
-rw-r--r--docker/credentials/store.py107
-rw-r--r--docker/credentials/utils.py38
-rw-r--r--docker/errors.py3
-rw-r--r--docker/models/containers.py100
-rw-r--r--docker/models/images.py67
-rw-r--r--docker/models/networks.py3
-rw-r--r--docker/models/services.py95
-rw-r--r--docker/models/swarm.py32
-rw-r--r--docker/tls.py4
-rw-r--r--docker/transport/__init__.py11
-rw-r--r--docker/transport/basehttpadapter.py8
-rw-r--r--docker/transport/npipeconn.py8
-rw-r--r--docker/transport/npipesocket.py4
-rw-r--r--docker/transport/sshconn.py116
-rw-r--r--docker/transport/ssladapter.py8
-rw-r--r--docker/transport/unixconn.py8
-rw-r--r--docker/types/__init__.py5
-rw-r--r--docker/types/containers.py80
-rw-r--r--docker/types/daemon.py14
-rw-r--r--docker/types/healthcheck.py8
-rw-r--r--docker/types/services.py105
-rw-r--r--docker/utils/ports.py8
-rw-r--r--docker/utils/proxy.py73
-rw-r--r--docker/utils/socket.py97
-rw-r--r--docker/utils/utils.py141
-rw-r--r--docker/version.py2
-rw-r--r--requirements.txt13
-rw-r--r--setup.py35
-rw-r--r--test-requirements.txt10
-rw-r--r--tests/gpg-keys/ownertrust3
-rw-r--r--tests/gpg-keys/secretbin0 -> 966 bytes
-rw-r--r--tests/helpers.py19
-rw-r--r--tests/integration/api_build_test.py58
-rw-r--r--tests/integration/api_client_test.py2
-rw-r--r--tests/integration/api_container_test.py272
-rw-r--r--tests/integration/api_exec_test.py181
-rw-r--r--tests/integration/api_healthcheck_test.py10
-rw-r--r--tests/integration/api_image_test.py24
-rw-r--r--tests/integration/api_network_test.py20
-rw-r--r--tests/integration/api_plugin_test.py16
-rw-r--r--tests/integration/api_service_test.py155
-rw-r--r--tests/integration/api_swarm_test.py54
-rw-r--r--tests/integration/base.py80
-rw-r--r--tests/integration/conftest.py10
-rw-r--r--tests/integration/credentials/__init__.py0
-rw-r--r--tests/integration/credentials/store_test.py87
-rw-r--r--tests/integration/credentials/utils_test.py22
-rw-r--r--tests/integration/errors_test.py4
-rw-r--r--tests/integration/models_containers_test.py102
-rw-r--r--tests/integration/models_images_test.py35
-rw-r--r--tests/integration/models_swarm_test.py12
-rw-r--r--tests/integration/regression_test.py12
-rw-r--r--tests/unit/api_build_test.py82
-rw-r--r--tests/unit/api_test.py143
-rw-r--r--tests/unit/auth_test.py442
-rw-r--r--tests/unit/dockertypes_test.py8
-rw-r--r--tests/unit/errors_test.py21
-rw-r--r--tests/unit/models_containers_test.py18
-rw-r--r--tests/unit/models_images_test.py22
-rw-r--r--tests/unit/models_services_test.py8
-rw-r--r--tests/unit/types_containers_test.py6
-rw-r--r--tests/unit/utils_config_test.py24
-rw-r--r--tests/unit/utils_proxy_test.py84
-rw-r--r--tests/unit/utils_test.py41
87 files changed, 3355 insertions, 1257 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index 41b3fa9..2ba6e02 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -6,3 +6,4 @@ include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
recursive-include tests/integration/testdata *
+recursive-include tests/gpg-keys *
diff --git a/PKG-INFO b/PKG-INFO
index d8c4409..8754725 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,105 +1,92 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
Name: docker
-Version: 3.4.1
+Version: 4.1.0
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Author: Joffrey F
-Author-email: joffrey@docker.com
+Maintainer: Joffrey F
+Maintainer-email: joffrey@docker.com
License: Apache License 2.0
-Description: Docker SDK for Python
- =====================
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Description: # Docker SDK for Python
- |Build Status|
+ [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
- A Python library for the Docker Engine API. It lets you do anything the
- ``docker`` command does, but from within Python apps – run containers,
- manage containers, manage Swarms, etc.
+ A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
- Installation
- ------------
+ ## Installation
- The latest stable version `is available on
- PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
- your ``requirements.txt`` file or install with pip:
-
- ::
+ The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
- If you are intending to connect to a docker host via TLS, add
- ``docker[tls]`` to your requirements instead, or install with pip:
-
- ::
+ If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
- Usage
- -----
-
- Connect to Docker using the default socket or the configuration in your
- environment:
+ ## Usage
- .. code:: python
+ Connect to Docker using the default socket or the configuration in your environment:
- import docker
- client = docker.from_env()
+ ```python
+ import docker
+ client = docker.from_env()
+ ```
You can run containers:
- .. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
+ ```python
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+ ```
You can run containers in the background:
- .. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
+ ```python
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+ ```
You can manage containers:
- .. code:: python
+ ```python
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+ >>> container = client.containers.get('45e6d2de7c54')
- >>> container = client.containers.get('45e6d2de7c54')
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
+ >>> container.logs()
+ "Reticulating spline 1...\n"
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
+ >>> container.stop()
+ ```
You can stream logs:
- .. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
+ ```python
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+ ```
You can manage images:
- .. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```python
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
- `Read the full documentation <https://docker-py.readthedocs.io>`__ to
- see everything you can do.
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
+ [Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
@@ -110,9 +97,13 @@ Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/markdown
+Provides-Extra: tls
+Provides-Extra: ssh
diff --git a/README.rst b/README.rst
deleted file mode 100644
index d0117e6..0000000
--- a/README.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-Docker SDK for Python
-=====================
-
-|Build Status|
-
-A Python library for the Docker Engine API. It lets you do anything the
-``docker`` command does, but from within Python apps – run containers,
-manage containers, manage Swarms, etc.
-
-Installation
-------------
-
-The latest stable version `is available on
-PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
-your ``requirements.txt`` file or install with pip:
-
-::
-
- pip install docker
-
-If you are intending to connect to a docker host via TLS, add
-``docker[tls]`` to your requirements instead, or install with pip:
-
-::
-
- pip install docker[tls]
-
-Usage
------
-
-Connect to Docker using the default socket or the configuration in your
-environment:
-
-.. code:: python
-
- import docker
- client = docker.from_env()
-
-You can run containers:
-
-.. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
-
-You can run containers in the background:
-
-.. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
-
-You can manage containers:
-
-.. code:: python
-
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
-
- >>> container = client.containers.get('45e6d2de7c54')
-
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
-
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
-
-You can stream logs:
-
-.. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
-
-You can manage images:
-
-.. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
-
-`Read the full documentation <https://docker-py.readthedocs.io>`__ to
-see everything you can do.
-
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
index d8c4409..8754725 100644
--- a/docker.egg-info/PKG-INFO
+++ b/docker.egg-info/PKG-INFO
@@ -1,105 +1,92 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
Name: docker
-Version: 3.4.1
+Version: 4.1.0
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Author: Joffrey F
-Author-email: joffrey@docker.com
+Maintainer: Joffrey F
+Maintainer-email: joffrey@docker.com
License: Apache License 2.0
-Description: Docker SDK for Python
- =====================
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Description: # Docker SDK for Python
- |Build Status|
+ [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
- A Python library for the Docker Engine API. It lets you do anything the
- ``docker`` command does, but from within Python apps – run containers,
- manage containers, manage Swarms, etc.
+ A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
- Installation
- ------------
+ ## Installation
- The latest stable version `is available on
- PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
- your ``requirements.txt`` file or install with pip:
-
- ::
+ The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
- If you are intending to connect to a docker host via TLS, add
- ``docker[tls]`` to your requirements instead, or install with pip:
-
- ::
+ If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
- Usage
- -----
-
- Connect to Docker using the default socket or the configuration in your
- environment:
+ ## Usage
- .. code:: python
+ Connect to Docker using the default socket or the configuration in your environment:
- import docker
- client = docker.from_env()
+ ```python
+ import docker
+ client = docker.from_env()
+ ```
You can run containers:
- .. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
+ ```python
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+ ```
You can run containers in the background:
- .. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
+ ```python
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+ ```
You can manage containers:
- .. code:: python
+ ```python
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+ >>> container = client.containers.get('45e6d2de7c54')
- >>> container = client.containers.get('45e6d2de7c54')
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
+ >>> container.logs()
+ "Reticulating spline 1...\n"
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
+ >>> container.stop()
+ ```
You can stream logs:
- .. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
+ ```python
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+ ```
You can manage images:
- .. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```python
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
- `Read the full documentation <https://docker-py.readthedocs.io>`__ to
- see everything you can do.
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
+ [Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
@@ -110,9 +97,13 @@ Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/markdown
+Provides-Extra: tls
+Provides-Extra: ssh
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
index 02d2f19..6daee4c 100644
--- a/docker.egg-info/SOURCES.txt
+++ b/docker.egg-info/SOURCES.txt
@@ -1,7 +1,6 @@
LICENSE
MANIFEST.in
README.md
-README.rst
requirements.txt
setup.cfg
setup.py
@@ -33,6 +32,11 @@ docker/api/secret.py
docker/api/service.py
docker/api/swarm.py
docker/api/volume.py
+docker/credentials/__init__.py
+docker/credentials/constants.py
+docker/credentials/errors.py
+docker/credentials/store.py
+docker/credentials/utils.py
docker/models/__init__.py
docker/models/configs.py
docker/models/containers.py
@@ -46,8 +50,10 @@ docker/models/services.py
docker/models/swarm.py
docker/models/volumes.py
docker/transport/__init__.py
+docker/transport/basehttpadapter.py
docker/transport/npipeconn.py
docker/transport/npipesocket.py
+docker/transport/sshconn.py
docker/transport/ssladapter.py
docker/transport/unixconn.py
docker/types/__init__.py
@@ -65,10 +71,13 @@ docker/utils/decorators.py
docker/utils/fnmatch.py
docker/utils/json_stream.py
docker/utils/ports.py
+docker/utils/proxy.py
docker/utils/socket.py
docker/utils/utils.py
tests/__init__.py
tests/helpers.py
+tests/gpg-keys/ownertrust
+tests/gpg-keys/secret
tests/integration/__init__.py
tests/integration/api_build_test.py
tests/integration/api_client_test.py
@@ -96,6 +105,9 @@ tests/integration/models_services_test.py
tests/integration/models_swarm_test.py
tests/integration/models_volumes_test.py
tests/integration/regression_test.py
+tests/integration/credentials/__init__.py
+tests/integration/credentials/store_test.py
+tests/integration/credentials/utils_test.py
tests/integration/testdata/dummy-plugin/config.json
tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
tests/unit/__init__.py
@@ -120,9 +132,11 @@ tests/unit/models_resources_test.py
tests/unit/models_services_test.py
tests/unit/ssladapter_test.py
tests/unit/swarm_test.py
+tests/unit/types_containers_test.py
tests/unit/utils_build_test.py
tests/unit/utils_config_test.py
tests/unit/utils_json_stream_test.py
+tests/unit/utils_proxy_test.py
tests/unit/utils_test.py
tests/unit/testdata/certs/ca.pem
tests/unit/testdata/certs/cert.pem
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
index e0b0763..415d2e6 100644
--- a/docker.egg-info/requires.txt
+++ b/docker.egg-info/requires.txt
@@ -1,7 +1,6 @@
-requests!=2.18.0,>=2.14.2
six>=1.4.0
websocket-client>=0.32.0
-docker-pycreds>=0.3.0
+requests!=2.18.0,>=2.14.2
[:python_version < "3.3"]
ipaddress>=1.0.16
@@ -13,9 +12,12 @@ backports.ssl_match_hostname>=3.5
pypiwin32==219
[:sys_platform == "win32" and python_version >= "3.6"]
-pypiwin32==220
+pypiwin32==223
+
+[ssh]
+paramiko>=2.4.2
[tls]
-pyOpenSSL>=0.14
+pyOpenSSL>=17.5.0
cryptography>=1.3.4
idna>=2.0.0
diff --git a/docker/api/build.py b/docker/api/build.py
index 419255f..365129a 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -19,7 +19,8 @@ class BuildApiMixin(object):
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
- squash=None, extra_hosts=None, platform=None, isolation=None):
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
@@ -103,6 +104,10 @@ class BuildApiMixin(object):
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
A generator for the build output.
@@ -116,6 +121,7 @@ class BuildApiMixin(object):
remote = context = None
headers = {}
container_limits = container_limits or {}
+ buildargs = buildargs or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
@@ -168,6 +174,10 @@ class BuildApiMixin(object):
}
params.update(container_limits)
+ if use_config_proxy:
+ proxy_args = self._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
if buildargs:
params.update({'buildargs': json.dumps(buildargs)})
@@ -286,30 +296,21 @@ class BuildApiMixin(object):
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
- if not self._auth_configs:
+ if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
- self._auth_configs = auth.load_config()
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
- auth_data = {}
- if self._auth_configs.get('credsStore'):
- # Using a credentials store, we need to retrieve the
- # credentials for each registry listed in the config.json file
- # Matches CLI behavior: https://github.com/docker/docker/blob/
- # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
- # credentials/native_store.go#L68-L83
- for registry in self._auth_configs.get('auths', {}).keys():
- auth_data[registry] = auth.resolve_authconfig(
- self._auth_configs, registry,
- credstore_env=self.credstore_env,
- )
- else:
- auth_data = self._auth_configs.get('auths', {}).copy()
- # See https://github.com/docker/docker-py/issues/1683
- if auth.INDEX_NAME in auth_data:
- auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
'Sending auth config ({0})'.format(
@@ -317,9 +318,10 @@ class BuildApiMixin(object):
)
)
- headers['X-Registry-Config'] = auth.encode_header(
- auth_data
- )
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
else:
log.debug('No auth config found')
@@ -331,7 +333,14 @@ def process_dockerfile(dockerfile, path):
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
-
+ if constants.IS_WINDOWS_PLATFORM and path.startswith(
+ constants.WINDOWS_LONGPATH_PREFIX):
+ abs_dockerfile = '{}{}'.format(
+ constants.WINDOWS_LONGPATH_PREFIX,
+ os.path.normpath(
+ abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]
+ )
+ )
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
diff --git a/docker/api/client.py b/docker/api/client.py
index 91da1c8..35dc84e 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -22,20 +22,26 @@ from .volume import VolumeApiMixin
from .. import auth
from ..constants import (
DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
- DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
- MINIMUM_DOCKER_API_VERSION
+ DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION,
+ STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS
)
from ..errors import (
DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception
)
from ..tls import TLSConfig
-from ..transport import SSLAdapter, UnixAdapter
+from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
from ..utils import utils, check_resource, update_headers, config
-from ..utils.socket import frames_iter, socket_raw_iter
+from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor
from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
try:
- from ..transport import NpipeAdapter
+ from ..transport import NpipeHTTPAdapter
+except ImportError:
+ pass
+
+try:
+ from ..transport import SSHHTTPAdapter
except ImportError:
pass
@@ -76,7 +82,7 @@ class APIClient(
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -95,7 +101,7 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None):
super(APIClient, self).__init__()
@@ -109,16 +115,29 @@ class APIClient(
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
self._auth_configs = auth.load_config(
- config_dict=self._general_configs
+ config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
+ self._custom_adapter = UnixHTTPAdapter(
base_url, timeout, pool_connections=num_pools
)
self.mount('http+docker://', self._custom_adapter)
@@ -132,7 +151,7 @@ class APIClient(
'The npipe:// protocol is only supported on Windows'
)
try:
- self._custom_adapter = NpipeAdapter(
+ self._custom_adapter = NpipeHTTPAdapter(
base_url, timeout, pool_connections=num_pools
)
except NameError:
@@ -141,12 +160,25 @@ class APIClient(
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ try:
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ except NameError:
+ raise DockerException(
+ 'Install paramiko package to enable ssh:// support'
+ )
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
- self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self._custom_adapter = SSLHTTPAdapter(
+ pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
@@ -279,6 +311,8 @@ class APIClient(
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
elif six.PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
@@ -362,19 +396,23 @@ class APIClient(
for out in response.iter_content(chunk_size, decode):
yield out
- def _read_from_socket(self, response, stream, tty=False):
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
- gen = None
- if tty is False:
- gen = frames_iter(socket)
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
else:
- gen = socket_raw_iter(socket)
+ # The generator will output strings
+ gen = (data for (_, data) in gen)
if stream:
return gen
else:
- return six.binary_type().join(gen)
+ # Wait for all the frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
@@ -457,4 +495,6 @@ class APIClient(
Returns:
None
"""
- self._auth_configs = auth.load_config(dockercfg_path)
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
diff --git a/docker/api/config.py b/docker/api/config.py
index 767bef2..93e5168 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -42,7 +42,7 @@ class ConfigApiMixin(object):
Retrieve config metadata
Args:
- id (string): Full ID of the config to remove
+ id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/container.py b/docker/api/container.py
index d4f75f5..45bd352 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,19 +1,21 @@
-import six
from datetime import datetime
+import six
+
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
-from ..types import (
- CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
- NetworkingConfig
-)
+from ..types import CancellableStream
+from ..types import ContainerConfig
+from ..types import EndpointConfig
+from ..types import HostConfig
+from ..types import NetworkingConfig
class ContainerApiMixin(object):
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
- stream=False, logs=False):
+ stream=False, logs=False, demux=False):
"""
Attach to a container.
@@ -28,11 +30,15 @@ class ContainerApiMixin(object):
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
+ demux (bool): Keep stdout and stderr separate.
Returns:
- By default, the container's output as a single string.
+ By default, the container's output as a single string (two if
+ ``demux=True``: one for stdout and one for stderr).
- If ``stream=True``, an iterator of output strings.
+ If ``stream=True``, an iterator of output strings. If
+ ``demux=True``, two iterators are returned: one for stdout and one
+ for stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -54,8 +60,7 @@ class ContainerApiMixin(object):
response = self._post(u, headers=headers, params=params, stream=True)
output = self._read_from_socket(
- response, stream, self._check_is_tty(container)
- )
+ response, stream, self._check_is_tty(container), demux=demux)
if stream:
return CancellableStream(output, response)
@@ -169,7 +174,8 @@ class ContainerApiMixin(object):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -218,7 +224,8 @@ class ContainerApiMixin(object):
working_dir=None, domainname=None, host_config=None,
mac_address=None, labels=None, stop_signal=None,
networking_config=None, healthcheck=None,
- stop_timeout=None, runtime=None):
+ stop_timeout=None, runtime=None,
+ use_config_proxy=True):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@@ -387,6 +394,10 @@ class ContainerApiMixin(object):
runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being created.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
@@ -400,6 +411,14 @@ class ContainerApiMixin(object):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ if use_config_proxy:
+ environment = self._proxy_configs.inject_proxy_environment(
+ environment
+ ) or None
+
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open, tty,
ports, environment, volumes,
@@ -465,30 +484,26 @@ class ContainerApiMixin(object):
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
init (bool): Run an init inside the container that forwards
signals and reaps processes
- init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
- isolation (str): Isolation technology to use. Default: `None`.
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ isolation (str): Isolation technology to use. Default: ``None``.
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration
lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
+ mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -500,7 +515,7 @@ class ContainerApiMixin(object):
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
- on the bridge network.
+ the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
@@ -543,10 +558,12 @@ class ContainerApiMixin(object):
}
ulimits (:py:class:`list`): Ulimits to set inside the container,
- as a list of dicts.
+ as a list of :py:class:`docker.types.Ulimit` instances.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
runtime (str): Runtime to use with this container.
@@ -609,9 +626,10 @@ class ContainerApiMixin(object):
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
- links (:py:class:`list`): A list of links for this endpoint.
- Containers declared in this list will be linked to this
- container. Defaults to ``None``.
+ links (dict): Mapping of links for this endpoint using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to this
+ container using the provided alias. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
@@ -626,7 +644,7 @@ class ContainerApiMixin(object):
>>> endpoint_config = client.create_endpoint_config(
aliases=['web', 'app'],
- links=['app_db'],
+ links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
)
@@ -695,6 +713,18 @@ class ContainerApiMixin(object):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> c = docker.APIClient()
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = c.get_archive(container, '/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
params = {
'path': path
@@ -763,16 +793,16 @@ class ContainerApiMixin(object):
Args:
container (str): The container to get logs from
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -888,9 +918,10 @@ class ContainerApiMixin(object):
if '/' in private_port:
return port_settings.get(private_port)
- h_ports = port_settings.get(private_port + '/tcp')
- if h_ports is None:
- h_ports = port_settings.get(private_port + '/udp')
+ for protocol in ['tcp', 'udp', 'sctp']:
+ h_ports = port_settings.get(private_port + '/' + protocol)
+ if h_ports:
+ break
return h_ports
@@ -1072,7 +1103,8 @@ class ContainerApiMixin(object):
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -1086,6 +1118,10 @@ class ContainerApiMixin(object):
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
+ if decode:
+ raise errors.InvalidArgument(
+ "decode is only available in conjuction with stream=True"
+ )
return self._result(self._get(url, params={'stream': False}),
json=True)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 76a94cf..f715a13 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -42,8 +42,8 @@ class DaemonApiMixin(object):
Example:
- >>> for event in client.events()
- ... print event
+ >>> for event in client.events(decode=True)
+ ... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
@@ -54,7 +54,7 @@ class DaemonApiMixin(object):
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -124,13 +124,15 @@ class DaemonApiMixin(object):
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
- self._auth_configs = auth.load_config(dockercfg_path)
- elif not self._auth_configs:
- self._auth_configs = auth.load_config()
-
- authcfg = auth.resolve_authconfig(
- self._auth_configs, registry, credstore_env=self.credstore_env,
- )
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
@@ -146,9 +148,7 @@ class DaemonApiMixin(object):
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- if 'auths' not in self._auth_configs:
- self._auth_configs['auths'] = {}
- self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 986d87f..4c49ac3 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -118,7 +118,7 @@ class ExecApiMixin(object):
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
- socket=False):
+ socket=False, demux=False):
"""
Start a previously set up exec instance.
@@ -130,11 +130,15 @@ class ExecApiMixin(object):
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations.
+ demux (bool): Return stdout and stderr separately
Returns:
- (generator or str): If ``stream=True``, a generator yielding
- response chunks. If ``socket=True``, a socket object for the
- connection. A string containing response data otherwise.
+
+ (generator or str or tuple): If ``stream=True``, a generator
+ yielding response chunks. If ``socket=True``, a socket object for
+ the connection. A string containing response data otherwise. If
+ ``demux=True``, a tuple with two elements of type byte: stdout and
+ stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -162,4 +166,4 @@ class ExecApiMixin(object):
return self._result(res)
if socket:
return self._get_raw_response_socket(res)
- return self._read_from_socket(res, stream, tty)
+ return self._read_from_socket(res, stream, tty=tty, demux=demux)
diff --git a/docker/api/image.py b/docker/api/image.py
index 5f05d88..11c8cf7 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -32,7 +32,7 @@ class ImageApiMixin(object):
Example:
>>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
@@ -70,7 +70,8 @@ class ImageApiMixin(object):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
@@ -247,12 +248,15 @@ class ImageApiMixin(object):
@utils.minimum_version('1.30')
@utils.check_resource('image')
- def inspect_distribution(self, image):
+ def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
@@ -261,9 +265,21 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ registry, _ = auth.resolve_repository_name(image)
+
+ headers = {}
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ url = self._url("/distribution/{0}/json", image)
return self._result(
- self._get(self._url("/distribution/{0}/json", image)), True
+ self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
@@ -334,11 +350,11 @@ class ImageApiMixin(object):
Args:
repository (str): The repository to pull
tag (str): The tag to pull
- stream (bool): Stream the output as a generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ stream (bool): Stream the output as a generator. Make sure to
+ consume the generator, otherwise pull might get cancelled.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
@@ -352,8 +368,8 @@ class ImageApiMixin(object):
Example:
- >>> for line in cli.pull('busybox', stream=True):
- ... print(json.dumps(json.loads(line), indent=4))
+ >>> for line in cli.pull('busybox', stream=True, decode=True):
+ ... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
@@ -413,10 +429,9 @@ class ImageApiMixin(object):
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
@@ -428,12 +443,12 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
- >>> for line in cli.push('yourname/app', stream=True):
- ... print line
- {"status":"Pushing repository yourname/app (1 tags)"}
- {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
- {"status":"Image already pushed, skipping","progressDetail":{},
- "id":"511136ea3c5a"}
+ >>> for line in cli.push('yourname/app', stream=True, decode=True):
+ ... print(line)
+ {'status': 'Pushing repository yourname/app (1 tags)'}
+ {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
+ {'status': 'Image already pushed, skipping', 'progressDetail':{},
+ 'id': '511136ea3c5a'}
...
"""
diff --git a/docker/api/network.py b/docker/api/network.py
index 57ed8d3..750b91b 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -7,7 +7,7 @@ from .. import utils
class NetworkApiMixin(object):
def networks(self, names=None, ids=None, filters=None):
"""
- List networks. Similar to the ``docker networks ls`` command.
+ List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
@@ -15,7 +15,8 @@ class NetworkApiMixin(object):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
+ such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
diff --git a/docker/api/secret.py b/docker/api/secret.py
index fa4c2ab..e57952b 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -53,7 +53,7 @@ class SecretApiMixin(object):
Retrieve secret metadata
Args:
- id (string): Full ID of the secret to remove
+ id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/service.py b/docker/api/service.py
index 03b0ca6..e9027bf 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -2,7 +2,8 @@ from .. import auth, errors, utils
from ..types import ServiceMode
-def _check_api_features(version, task_template, update_config, endpoint_spec):
+def _check_api_features(version, task_template, update_config, endpoint_spec,
+ rollback_config):
def raise_version_error(param, min_version):
raise errors.InvalidVersion(
@@ -18,10 +19,24 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25')
+ if utils.version_lt(version, '1.28'):
+ if update_config.get('FailureAction') == 'rollback':
+ raise_version_error(
+ 'UpdateConfig.failure_action rollback', '1.28'
+ )
+
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29')
+ if rollback_config is not None:
+ if utils.version_lt(version, '1.28'):
+ raise_version_error('rollback_config', '1.28')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('RollbackConfig.order', '1.29')
+
if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
@@ -73,6 +88,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')
+ if utils.version_lt(version, '1.38'):
+ if container_spec.get('Init') is not None:
+ raise_version_error('ContainerSpec.init', '1.38')
+
if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
@@ -99,7 +118,7 @@ class ServiceApiMixin(object):
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
- endpoint_spec=None
+ endpoint_spec=None, rollback_config=None
):
"""
Create a service.
@@ -114,8 +133,11 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
@@ -129,7 +151,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
url = self._url('/services/create')
@@ -160,6 +183,9 @@ class ServiceApiMixin(object):
if update_config is not None:
data['UpdateConfig'] = update_config
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@@ -176,7 +202,8 @@ class ServiceApiMixin(object):
into the service inspect output.
Returns:
- ``True`` if successful.
+ (dict): A dictionary of the server-side representation of the
+ service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
@@ -336,7 +363,8 @@ class ServiceApiMixin(object):
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
- endpoint_spec=None, fetch_current_spec=False):
+ endpoint_spec=None, fetch_current_spec=False,
+ rollback_config=None):
"""
Update a service.
@@ -354,15 +382,18 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
- ``True`` if successful.
+ A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
@@ -370,7 +401,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
if fetch_current_spec:
@@ -416,6 +448,11 @@ class ServiceApiMixin(object):
else:
data['UpdateConfig'] = current.get('UpdateConfig')
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+ else:
+ data['RollbackConfig'] = current.get('RollbackConfig')
+
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
@@ -440,5 +477,4 @@ class ServiceApiMixin(object):
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
- self._raise_for_status(resp)
- return True
+ return self._result(resp, json=True)
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 04595da..897f08e 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,6 @@
import logging
from six.moves import http_client
+from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
from .. import utils
@@ -82,7 +83,9 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, swarm_spec=None):
+ force_new_cluster=False, swarm_spec=None,
+ default_addr_pool=None, subnet_size=None,
+ data_path_addr=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
@@ -107,9 +110,17 @@ class SwarmApiMixin(object):
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
+ default_addr_pool (list of strings): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
- ``True`` if successful.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -119,15 +130,44 @@ class SwarmApiMixin(object):
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
+
+ if default_addr_pool is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Address pool is only available for API version >= 1.39'
+ )
+ # subnet_size becomes 0 if not set with default_addr_pool
+ if subnet_size is None:
+ subnet_size = DEFAULT_SWARM_SUBNET_SIZE
+
+ if subnet_size is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Subnet size is only available for API version >= 1.39'
+ )
+ # subnet_size is ignored if set without default_addr_pool
+ if default_addr_pool is None:
+ default_addr_pool = DEFAULT_SWARM_ADDR_POOL
+
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
+ 'DefaultAddrPool': default_addr_pool,
+ 'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
response = self._post_json(url, data=data)
- self._raise_for_status(response)
- return True
+ return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
@@ -165,7 +205,7 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
- advertise_addr=None):
+ advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
@@ -176,7 +216,7 @@ class SwarmApiMixin(object):
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
- Endpoint (VTEP). Default: ``None``
+ Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
@@ -184,6 +224,8 @@ class SwarmApiMixin(object):
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
@@ -193,11 +235,20 @@ class SwarmApiMixin(object):
If the server returns an error.
"""
data = {
- "RemoteAddrs": remote_addrs,
- "ListenAddr": listen_addr,
- "JoinToken": join_token,
- "AdvertiseAddr": advertise_addr,
+ 'RemoteAddrs': remote_addrs,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token,
+ 'AdvertiseAddr': advertise_addr,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
@@ -355,8 +406,10 @@ class SwarmApiMixin(object):
return True
@utils.minimum_version('1.24')
- def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
- rotate_manager_token=False):
+ def update_swarm(self, version, swarm_spec=None,
+ rotate_worker_token=False,
+ rotate_manager_token=False,
+ rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
@@ -370,6 +423,8 @@ class SwarmApiMixin(object):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Returns:
``True`` if the request went through.
@@ -378,12 +433,20 @@ class SwarmApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
-
url = self._url('/swarm/update')
- response = self._post_json(url, data=swarm_spec, params={
+ params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
- })
+ }
+ if rotate_manager_unlock_key:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Rotate manager unlock key '
+ 'is only available for API version >= 1.25'
+ )
+ params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
+
+ response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True
diff --git a/docker/auth.py b/docker/auth.py
index 9635f93..6a07ea2 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -2,9 +2,9 @@ import base64
import json
import logging
-import dockerpycreds
import six
+from . import credentials
from . import errors
from .utils import config
@@ -39,11 +39,11 @@ def resolve_index_name(index_name):
def get_config_header(client, registry):
log.debug('Looking for auth config')
- if not client._auth_configs:
+ if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
- client._auth_configs = load_config()
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
@@ -70,81 +70,258 @@ def split_repo_name(repo_name):
def get_credential_store(authconfig, registry):
- if not registry or registry == INDEX_NAME:
- registry = 'https://index.docker.io/v1/'
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in six.iteritems(entries):
+ if not isinstance(entry, dict):
+ log.debug(
+ 'Config entry for key {0} is not auth config'.format(
+ registry
+ )
+ )
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(
+ registry
+ )
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug(
+ 'Found an IdentityToken entry for registry {0}'.format(
+ registry
+ )
+ )
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ 'Auth data for {0} is absent. Client might be using a '
+ 'credentials store instead.'.format(registry)
+ )
+ conf[registry] = {}
+ continue
- return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
- 'credsStore'
- )
+ username, password = decode_auth(entry['auth'])
+ log.debug(
+ 'Found entry (registry={0}, username={1})'
+ .format(repr(registry), repr(username))
+ )
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
-def resolve_authconfig(authconfig, registry=None, credstore_env=None):
- """
- Returns the authentication data from the given auth configuration for a
- specific registry. As with the Docker client, legacy entries in the config
- with full URLs are stripped down to hostnames before checking for a match.
- Returns None if no match was found.
- """
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
- if 'credHelpers' in authconfig or 'credsStore' in authconfig:
- store_name = get_credential_store(authconfig, registry)
- if store_name is not None:
- log.debug(
- 'Using credentials store "{0}"'.format(store_name)
- )
- cfg = _resolve_authconfig_credstore(
- authconfig, registry, store_name, env=credstore_env
- )
- if cfg is not None:
- return cfg
- log.debug('No entry in credstore - fetching from auth dict')
+ @property
+ def auths(self):
+ return self.get('auths', {})
- # Default to the public index server
- registry = resolve_index_name(registry) if registry else INDEX_NAME
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
- authdict = authconfig.get('auths', {})
- if registry in authdict:
- log.debug("Found {0}".format(repr(registry)))
- return authdict[registry]
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
- for key, conf in six.iteritems(authdict):
- if resolve_index_name(key) == registry:
- log.debug("Found {0}".format(repr(key)))
- return conf
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
- log.debug("No entry found")
- return None
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug(
+ 'Using credentials store "{0}"'.format(store_name)
+ )
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug("Looking for auth entry for {0}".format(repr(registry)))
-def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
- env=None):
- if not registry or registry == INDEX_NAME:
- # The ecosystem is a little schizophrenic with index.docker.io VS
- # docker.io - in that case, it seems the full URL is necessary.
- registry = INDEX_URL
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
- store = dockerpycreds.Store(credstore_name, environment=env)
- try:
- data = store.get(registry)
- res = {
- 'ServerAddress': registry,
- }
- if data['Username'] == TOKEN_USERNAME:
- res['IdentityToken'] = data['Secret']
- else:
- res.update({
- 'Username': data['Username'],
- 'Password': data['Secret'],
- })
- return res
- except dockerpycreds.CredentialsNotFound as e:
- log.debug('No entry found')
+ if registry in self.auths:
+ log.debug("Found {0}".format(repr(registry)))
+ return self.auths[registry]
+
+ for key, conf in six.iteritems(self.auths):
+ if resolve_index_name(key) == registry:
+ log.debug("Found {0}".format(repr(key)))
+ return conf
+
+ log.debug("No entry found")
return None
- except dockerpycreds.StoreError as e:
- raise errors.DockerException(
- 'Credentials store error: {0}'.format(repr(e))
- )
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except credentials.CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except credentials.StoreError as e:
+ raise errors.DockerException(
+ 'Credentials store error: {0}'.format(repr(e))
+ )
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = credentials.Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
@@ -177,100 +354,11 @@ def parse_auth(entries, raise_on_error=False):
Authentication registry.
"""
- conf = {}
- for registry, entry in six.iteritems(entries):
- if not isinstance(entry, dict):
- log.debug(
- 'Config entry for key {0} is not auth config'.format(registry)
- )
- # We sometimes fall back to parsing the whole config as if it was
- # the auth config by itself, for legacy purposes. In that case, we
- # fail silently and return an empty conf if any of the keys is not
- # formatted properly.
- if raise_on_error:
- raise errors.InvalidConfigFile(
- 'Invalid configuration for registry {0}'.format(registry)
- )
- return {}
- if 'identitytoken' in entry:
- log.debug('Found an IdentityToken entry for registry {0}'.format(
- registry
- ))
- conf[registry] = {
- 'IdentityToken': entry['identitytoken']
- }
- continue # Other values are irrelevant if we have a token, skip.
-
- if 'auth' not in entry:
- # Starting with engine v1.11 (API 1.23), an empty dictionary is
- # a valid value in the auths config.
- # https://github.com/docker/compose/issues/3265
- log.debug(
- 'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'.format(registry)
- )
- conf[registry] = {}
- continue
-
- username, password = decode_auth(entry['auth'])
- log.debug(
- 'Found entry (registry={0}, username={1})'
- .format(repr(registry), repr(username))
- )
+ return AuthConfig.parse_auth(entries, raise_on_error)
- conf[registry] = {
- 'username': username,
- 'password': password,
- 'email': entry.get('email'),
- 'serveraddress': registry,
- }
- return conf
-
-def load_config(config_path=None, config_dict=None):
- """
- Loads authentication data from a Docker configuration file in the given
- root directory or if config_path is passed use given path.
- Lookup priority:
- explicit config_path parameter > DOCKER_CONFIG environment variable >
- ~/.docker/config.json > ~/.dockercfg
- """
-
- if not config_dict:
- config_file = config.find_config_file(config_path)
-
- if not config_file:
- return {}
- try:
- with open(config_file) as f:
- config_dict = json.load(f)
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
- return _load_legacy_config(config_file)
-
- res = {}
- if config_dict.get('auths'):
- log.debug("Found 'auths' section")
- res.update({
- 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
- })
- if config_dict.get('credsStore'):
- log.debug("Found 'credsStore' section")
- res.update({'credsStore': config_dict.pop('credsStore')})
- if config_dict.get('credHelpers'):
- log.debug("Found 'credHelpers' section")
- res.update({'credHelpers': config_dict.pop('credHelpers')})
- if res:
- return res
-
- log.debug(
- "Couldn't find auth-related section ; attempting to interpret"
- "as auth-only file"
- )
- return {'auths': parse_auth(config_dict)}
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
diff --git a/docker/client.py b/docker/client.py
index 8d4a52b..99ae196 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -26,7 +26,7 @@ class DockerClient(object):
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -62,7 +62,7 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
diff --git a/docker/constants.py b/docker/constants.py
index 7565a76..4b96e1c 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -14,7 +14,17 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/docker/credentials/__init__.py b/docker/credentials/__init__.py
new file mode 100644
index 0000000..31ad28e
--- /dev/null
+++ b/docker/credentials/__init__.py
@@ -0,0 +1,4 @@
+# flake8: noqa
+from .store import Store
+from .errors import StoreError, CredentialsNotFound
+from .constants import *
diff --git a/docker/credentials/constants.py b/docker/credentials/constants.py
new file mode 100644
index 0000000..6a82d8d
--- /dev/null
+++ b/docker/credentials/constants.py
@@ -0,0 +1,4 @@
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/docker/credentials/errors.py b/docker/credentials/errors.py
new file mode 100644
index 0000000..42a1bc1
--- /dev/null
+++ b/docker/credentials/errors.py
@@ -0,0 +1,25 @@
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(
+ 'No matching credentials in {}'.format(
+ program
+ )
+ )
+ return StoreError(
+ 'Credentials store {} exited with "{}".'.format(
+ program, cpe.output.decode('utf-8').strip()
+ )
+ )
diff --git a/docker/credentials/store.py b/docker/credentials/store.py
new file mode 100644
index 0000000..0017888
--- /dev/null
+++ b/docker/credentials/store.py
@@ -0,0 +1,107 @@
+import errno
+import json
+import subprocess
+
+import six
+
+from . import constants
+from . import errors
+from .utils import create_environment_dict
+from .utils import find_executable
+
+
+class Store(object):
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = find_executable(self.program)
+ self.environment = environment
+ if self.exe is None:
+ raise errors.InitializationError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, six.binary_type):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ 'No matching credentials in {}'.format(self.program)
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, six.binary_type):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ if six.PY3:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ else:
+ process = subprocess.Popen(
+ [self.exe, subcmd], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, env=env,
+ )
+ output, _ = process.communicate(data_input)
+ if process.returncode != 0:
+ raise subprocess.CalledProcessError(
+ returncode=process.returncode, cmd='', output=output
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+ else:
+ raise errors.StoreError(
+ 'Unexpected OS error "{}", errno={}'.format(
+ e.strerror, e.errno
+ )
+ )
+ return output
diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py
new file mode 100644
index 0000000..3f720ef
--- /dev/null
+++ b/docker/credentials/utils.py
@@ -0,0 +1,38 @@
+import distutils.spawn
+import os
+import sys
+
+
+def find_executable(executable, path=None):
+ """
+ As distutils.spawn.find_executable, but on Windows, look up
+ every extension declared in PATHEXT instead of just `.exe`
+ """
+ if sys.platform != 'win32':
+ return distutils.spawn.find_executable(executable, path)
+
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ for ext in extensions:
+ f = os.path.join(p, base + ext)
+ if os.path.isfile(f):
+ return f
+ return None
+ else:
+ return executable
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/docker/errors.py b/docker/errors.py
index 0253695..c340dcb 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -63,6 +63,9 @@ class APIError(requests.exceptions.HTTPError, DockerException):
if self.response is not None:
return self.response.status_code
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
def is_client_error(self):
if self.status_code is None:
return False
diff --git a/docker/models/containers.py b/docker/models/containers.py
index b33a718..d1f275f 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -15,7 +15,12 @@ from .resource import Collection, Model
class Container(Model):
-
+ """ Local representation of a container object. Detailed configuration may
+ be accessed through the :py:attr:`attrs` attribute. Note that local
+ attributes are cached; users may call :py:meth:`reload` to
+ query the Docker daemon for the current properties, causing
+ :py:attr:`attrs` to be refreshed.
+ """
@property
def name(self):
"""
@@ -57,6 +62,13 @@ class Container(Model):
return self.attrs['State']['Status']
return self.attrs['State']
+ @property
+ def ports(self):
+ """
+ The ports that the container exposes as a dictionary.
+ """
+ return self.attrs.get('NetworkSettings', {}).get('Ports', {})
+
def attach(self, **kwargs):
"""
Attach to this container.
@@ -139,7 +151,7 @@ class Container(Model):
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
- socket=False, environment=None, workdir=None):
+ socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
@@ -161,16 +173,18 @@ class Container(Model):
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
+ demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
- either ``stream```or ``socket`` is ``True``.
- output: (generator or str):
+ either ``stream`` or ``socket`` is ``True``.
+ output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
- A string containing response data otherwise.
+ If ``demux=True``, a tuple of two bytes: stdout and stderr.
+ A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
@@ -179,10 +193,11 @@ class Container(Model):
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
- workdir=workdir
+ workdir=workdir,
)
exec_output = self.client.api.exec_start(
- resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
+ demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
@@ -228,6 +243,17 @@ class Container(Model):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = container.get_archive('/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
return self.client.api.get_archive(self.id, path, chunk_size)
@@ -253,16 +279,16 @@ class Container(Model):
generator you can iterate over to retrieve log output as it happens.
Args:
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -380,7 +406,8 @@ class Container(Model):
Args:
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -521,12 +548,15 @@ class ContainerCollection(Collection):
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
+ cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
+ cpu_rt_period (int): Limit CPU real-time period in microseconds.
+ cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
@@ -558,7 +588,7 @@ class ContainerCollection(Collection):
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
@@ -570,19 +600,17 @@ class ContainerCollection(Collection):
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
+ kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration.
+ lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
@@ -590,6 +618,7 @@ class ContainerCollection(Collection):
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
+ mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -628,8 +657,8 @@ class ContainerCollection(Collection):
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
- ``port/protocol``, where the protocol is either ``tcp`` or
- ``udp``.
+ ``port/protocol``, where the protocol is either ``tcp``,
+ ``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
@@ -662,6 +691,7 @@ class ContainerCollection(Collection):
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
+ runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
@@ -691,13 +721,21 @@ class ContainerCollection(Collection):
}
tty (bool): Allocate a pseudo-TTY.
- ulimits (:py:class:`list`): Ulimits to set inside the container, as
- a list of dicts.
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of :py:class:`docker.types.Ulimit` instances.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
@@ -717,7 +755,6 @@ class ContainerCollection(Collection):
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
- runtime (str): Runtime to use with this container.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
@@ -863,7 +900,8 @@ class ContainerCollection(Collection):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -932,8 +970,8 @@ RUN_CREATE_KWARGS = [
'stdin_open',
'stop_signal',
'tty',
+ 'use_config_proxy',
'user',
- 'volume_driver',
'working_dir',
]
@@ -995,7 +1033,9 @@ RUN_HOST_CONFIG_KWARGS = [
'tmpfs',
'ulimits',
'userns_mode',
+ 'uts_mode',
'version',
+ 'volume_driver',
'volumes_from',
'runtime'
]
diff --git a/docker/models/images.py b/docker/models/images.py
index 41632c6..757a5a4 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -1,5 +1,6 @@
import itertools
import re
+import warnings
import six
@@ -59,14 +60,20 @@ class Image(Model):
"""
return self.client.api.history(self.id)
- def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
- chunk_size (int): The number of bytes returned by each iteration
- of the generator. If ``None``, data will be streamed as it is
- received. Default: 2 MB
+ chunk_size (int): The generator will return up to that much data
+ per iteration, but may return less. If ``None``, data will be
+ streamed as it is received. Default: 2 MB
+ named (str or bool): If ``False`` (default), the tarball will not
+ retain repository and tag information for this image. If set
+ to ``True``, the first tag in the :py:attr:`~tags` list will
+ be used to identify the image. Alternatively, any element of
+ the :py:attr:`~tags` list can be used as an argument to use
+ that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
@@ -78,12 +85,22 @@ class Image(Model):
Example:
>>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
- return self.client.api.get_image(self.id, chunk_size)
+ img = self.id
+ if named:
+ img = self.tags[0] if self.tags else img
+ if isinstance(named, six.string_types):
+ if named not in self.tags:
+ raise InvalidArgument(
+ "{} is not a valid tag for this image".format(named)
+ )
+ img = named
+
+ return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
@@ -241,6 +258,10 @@ class ImageCollection(Collection):
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
@@ -294,22 +315,26 @@ class ImageCollection(Collection):
"""
return self.prepare_model(self.client.api.inspect_image(name))
- def get_registry_data(self, name):
+ def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
+
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
- attrs=self.client.api.inspect_distribution(name),
+ attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
@@ -325,7 +350,8 @@ class ImageCollection(Collection):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(list of :py:class:`Image`): The images.
@@ -383,10 +409,9 @@ class ImageCollection(Collection):
Args:
repository (str): The repository to pull
tag (str): The tag to pull
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.client.DockerClient.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
Returns:
@@ -409,7 +434,21 @@ class ImageCollection(Collection):
if not tag:
repository, tag = parse_repository_tag(repository)
- self.client.api.pull(repository, tag=tag, **kwargs)
+ if 'stream' in kwargs:
+ warnings.warn(
+ '`stream` is not a valid parameter for this method'
+ ' and will be overridden'
+ )
+ del kwargs['stream']
+
+ pull_log = self.client.api.pull(
+ repository, tag=tag, stream=True, **kwargs
+ )
+ for _ in pull_log:
+ # We don't do anything with the logs, but we need
+ # to keep the connection alive and wait for the image
+ # to be pulled.
+ pass
if tag:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
diff --git a/docker/models/networks.py b/docker/models/networks.py
index be3291a..f944c8e 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -190,7 +190,8 @@ class NetworkCollection(Collection):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
diff --git a/docker/models/services.py b/docker/models/services.py
index 458d2c8..a35687b 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -1,6 +1,6 @@
import copy
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
-from docker.types import TaskTemplate, ContainerSpec, ServiceMode
+from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
from .resource import Model, Collection
@@ -42,7 +42,7 @@ class Service(Model):
``label``, and ``desired-state``.
Returns:
- (:py:class:`list`): List of task dictionaries.
+ :py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
@@ -84,26 +84,27 @@ class Service(Model):
def logs(self, **kwargs):
"""
- Get log stream for the service.
- Note: This method works only for services with the ``json-file``
- or ``journald`` logging drivers.
-
- Args:
- details (bool): Show extra details provided to logs.
- Default: ``False``
- follow (bool): Keep connection open to read logs as they are
- sent by the Engine. Default: ``False``
- stdout (bool): Return logs from ``stdout``. Default: ``False``
- stderr (bool): Return logs from ``stderr``. Default: ``False``
- since (int): UNIX timestamp for the logs staring point.
- Default: 0
- timestamps (bool): Add timestamps to every log line.
- tail (string or int): Number of log lines to be returned,
- counting from the current end of the logs. Specify an
- integer or ``'all'`` to output all log lines.
- Default: ``all``
-
- Returns (generator): Logs for the service.
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns:
+ generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
@@ -118,7 +119,7 @@ class Service(Model):
replicas (int): The number of containers that should be running.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
@@ -134,7 +135,7 @@ class Service(Model):
Force update the service even if no changes require it.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
@@ -152,13 +153,20 @@ class ServiceCollection(Collection):
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
- constraints (list of str): Placement constraints.
+ constraints (list of str): :py:class:`~docker.types.Placement`
+ constraints.
+ preferences (list of tuple): :py:class:`~docker.types.Placement`
+ preferences.
+ platforms (list of tuple): A list of platform constraints
+ expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
+ init (boolean): Run an init inside the container that forwards
+ signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
@@ -170,16 +178,19 @@ class ServiceCollection(Collection):
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
- networks (list of str): List of network names or IDs to attach
- the service to. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
- secrets (list of :py:class:`docker.types.SecretReference`): List
+ secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
@@ -195,13 +206,14 @@ class ServiceCollection(Collection):
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
- configs (:py:class:`list`): List of :py:class:`ConfigReference`
- that will be exposed to the service.
+ configs (:py:class:`list`): List of
+ :py:class:`~docker.types.ConfigReference` that will be exposed
+ to the service.
privileges (Privileges): Security options for the service's
containers.
Returns:
- (:py:class:`Service`) The created service.
+ :py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
@@ -223,7 +235,7 @@ class ServiceCollection(Collection):
into the output.
Returns:
- (:py:class:`Service`): The service.
+ :py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
@@ -248,7 +260,7 @@ class ServiceCollection(Collection):
Default: ``None``.
Returns:
- (list of :py:class:`Service`): The services.
+ list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
@@ -272,6 +284,7 @@ CONTAINER_SPEC_KWARGS = [
'hostname',
'hosts',
'image',
+ 'init',
'isolation',
'labels',
'mounts',
@@ -302,6 +315,12 @@ CREATE_SERVICE_KWARGS = [
'endpoint_spec',
]
+PLACEMENT_KWARGS = [
+ 'constraints',
+ 'preferences',
+ 'platforms',
+]
+
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
@@ -321,10 +340,12 @@ def _get_create_service_kwargs(func_name, kwargs):
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
- if 'constraints' in kwargs:
- task_template_kwargs['placement'] = {
- 'Constraints': kwargs.pop('constraints')
- }
+ placement = {}
+ for key in copy.copy(kwargs):
+ if key in PLACEMENT_KWARGS:
+ placement[key] = kwargs.pop(key)
+ placement = Placement(**placement)
+ task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index 7396e73..755c17d 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -34,7 +34,8 @@ class Swarm(Model):
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, **kwargs):
+ force_new_cluster=False, default_addr_pool=None,
+ subnet_size=None, data_path_addr=None, **kwargs):
"""
Initialize a new swarm on this Engine.
@@ -56,6 +57,14 @@ class Swarm(Model):
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
+ default_addr_pool (list of str): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
@@ -89,7 +98,7 @@ class Swarm(Model):
created in the orchestrator.
Returns:
- ``True`` if the request went through.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -99,7 +108,8 @@ class Swarm(Model):
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
- force_new_cluster=False, snapshot_interval=5000,
+ force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
+ subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
@@ -107,11 +117,15 @@ class Swarm(Model):
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
- 'force_new_cluster': force_new_cluster
+ 'force_new_cluster': force_new_cluster,
+ 'default_addr_pool': default_addr_pool,
+ 'subnet_size': subnet_size,
+ 'data_path_addr': data_path_addr,
}
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
- self.client.api.init_swarm(**init_kwargs)
+ node_id = self.client.api.init_swarm(**init_kwargs)
self.reload()
+ return node_id
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
@@ -137,7 +151,7 @@ class Swarm(Model):
unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False,
- **kwargs):
+ rotate_manager_unlock_key=False, **kwargs):
"""
Update the swarm's configuration.
@@ -150,7 +164,8 @@ class Swarm(Model):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
-
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
@@ -164,5 +179,6 @@ class Swarm(Model):
version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
- rotate_manager_token=rotate_manager_token
+ rotate_manager_token=rotate_manager_token,
+ rotate_manager_unlock_key=rotate_manager_unlock_key
)
diff --git a/docker/tls.py b/docker/tls.py
index 4900e9f..d4671d1 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,7 +2,7 @@ import os
import ssl
from . import errors
-from .transport import SSLAdapter
+from .transport import SSLHTTPAdapter
class TLSConfig(object):
@@ -105,7 +105,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', SSLAdapter(
+ client.mount('https://', SSLHTTPAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index abbee18..e37fc3b 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,8 +1,13 @@
# flake8: noqa
-from .unixconn import UnixAdapter
-from .ssladapter import SSLAdapter
+from .unixconn import UnixHTTPAdapter
+from .ssladapter import SSLHTTPAdapter
try:
- from .npipeconn import NpipeAdapter
+ from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
+
+try:
+ from .sshconn import SSHHTTPAdapter
+except ImportError:
+ pass
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
new file mode 100644
index 0000000..4d819b6
--- /dev/null
+++ b/docker/transport/basehttpadapter.py
@@ -0,0 +1,8 @@
+import requests.adapters
+
+
+class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
+ def close(self):
+ super(BaseHTTPAdapter, self).close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index ab9b904..aa05538 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,6 +1,7 @@
import six
import requests.adapters
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
@@ -68,7 +69,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
return conn or self._new_conn()
-class NpipeAdapter(requests.adapters.HTTPAdapter):
+class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
@@ -81,7 +82,7 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(NpipeAdapter, self).__init__()
+ super(NpipeHTTPAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -103,6 +104,3 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index c04b39d..ef02031 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -87,10 +87,6 @@ class NpipeSocket(object):
def dup(self):
return NpipeSocket(self._handle)
- @check_closed
- def fileno(self):
- return int(self._handle)
-
def getpeername(self):
return self._address
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py
new file mode 100644
index 0000000..5a8ceb0
--- /dev/null
+++ b/docker/transport/sshconn.py
@@ -0,0 +1,116 @@
+import paramiko
+import requests.adapters
+import six
+
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+if six.PY3:
+ import http.client as httplib
+else:
+ import httplib
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHConnection(httplib.HTTPConnection, object):
+ def __init__(self, ssh_transport, timeout=60):
+ super(SSHConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+
+ def connect(self):
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client, timeout=60, maxsize=10):
+ super(SSHConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = ssh_client.get_transport()
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except six.moves.queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client',
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS):
+ self.ssh_client = paramiko.SSHClient()
+ self.ssh_client.load_system_host_keys()
+
+ self.base_url = base_url
+ self._connect()
+ self.timeout = timeout
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(SSHHTTPAdapter, self).__init__()
+
+ def _connect(self):
+ parsed = six.moves.urllib_parse.urlparse(self.base_url)
+ self.ssh_client.connect(
+ parsed.hostname, parsed.port, parsed.username,
+ )
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ self.ssh_client, self.timeout
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super(SSHHTTPAdapter, self).close()
+ self.ssh_client.close()
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
index 8fafec3..12de76c 100644
--- a/docker/transport/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -7,6 +7,8 @@ import sys
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+
try:
import requests.packages.urllib3 as urllib3
except ImportError:
@@ -22,7 +24,7 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
urllib3.connection.match_hostname = match_hostname
-class SSLAdapter(HTTPAdapter):
+class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
@@ -34,7 +36,7 @@ class SSLAdapter(HTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- super(SSLAdapter, self).__init__(**kwargs)
+ super(SSLHTTPAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@@ -57,7 +59,7 @@ class SSLAdapter(HTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
- conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+ conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index c59821a..b619103 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -3,6 +3,7 @@ import requests.adapters
import socket
from six.moves import http_client as httplib
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
try:
@@ -69,7 +70,7 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
)
-class UnixAdapter(requests.adapters.HTTPAdapter):
+class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
@@ -85,7 +86,7 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(UnixAdapter, self).__init__()
+ super(UnixHTTPAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -107,6 +108,3 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 0b0d847..5db330e 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -5,7 +5,8 @@ from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
- Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
- ServiceMode, TaskTemplate, UpdateConfig
+ Mount, Placement, PlacementPreference, Privileges, Resources,
+ RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate,
+ UpdateConfig, NetworkAttachmentConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 2521420..fd8cab4 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -23,6 +23,35 @@ class LogConfigTypesEnum(object):
class LogConfig(DictType):
+ """
+ Configure logging for a container, when provided as an argument to
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+ You may refer to the
+ `official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
+ for more information.
+
+ Args:
+ type (str): Indicate which log driver to use. A set of valid drivers
+ is provided as part of the :py:attr:`LogConfig.types`
+ enum. Other values may be accepted depending on the engine version
+ and available logging plugins.
+ config (dict): A driver-dependent configuration dictionary. Please
+ refer to the driver's documentation for a list of valid config
+ keys.
+
+ Example:
+
+ >>> from docker.types import LogConfig
+ >>> lc = LogConfig(type=LogConfig.types.JSON, config={
+ ... 'max-size': '1g',
+ ... 'labels': 'production_status,geo'
+ ... })
+ >>> hc = client.create_host_config(log_config=lc)
+ >>> container = client.create_container('busybox', 'true',
+ ... host_config=hc)
+ >>> client.inspect_container(container)['HostConfig']['LogConfig']
+ {'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}}
+ """ # noqa: E501
types = LogConfigTypesEnum
def __init__(self, **kwargs):
@@ -50,14 +79,40 @@ class LogConfig(DictType):
return self['Config']
def set_config_value(self, key, value):
+ """ Set a the value for ``key`` to ``value`` inside the ``config``
+ dict.
+ """
self.config[key] = value
def unset_config(self, key):
+ """ Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
class Ulimit(DictType):
+ """
+ Create a ulimit declaration to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ name (str): Which ulimit will this apply to. A list of valid names can
+ be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_.
+ soft (int): The soft limit for this ulimit. Optional.
+ hard (int): The hard limit for this ulimit. Optional.
+
+ Example:
+
+ >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
+ >>> hc = client.create_host_config(ulimits=[nproc_limit])
+ >>> container = client.create_container(
+ 'busybox', 'true', host_config=hc
+ )
+ >>> client.inspect_container(container)['HostConfig']['Ulimits']
+ [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
+
+ """
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
@@ -115,11 +170,11 @@ class HostConfig(dict):
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None,
- isolation=None, auto_remove=False, storage_opt=None,
- init=None, init_path=None, volume_driver=None,
- cpu_count=None, cpu_percent=None, nano_cpus=None,
- cpuset_mems=None, runtime=None, mounts=None,
+ cpuset_cpus=None, userns_mode=None, uts_mode=None,
+ pids_limit=None, isolation=None, auto_remove=False,
+ storage_opt=None, init=None, init_path=None,
+ volume_driver=None, cpu_count=None, cpu_percent=None,
+ nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None):
@@ -264,10 +319,10 @@ class HostConfig(dict):
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- self['Ulimits'].append(l)
+ for lmt in ulimits:
+ if not isinstance(lmt, Ulimit):
+ lmt = Ulimit(**lmt)
+ self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
@@ -392,6 +447,11 @@ class HostConfig(dict):
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
+ if uts_mode:
+ if uts_mode != "host":
+ raise host_config_value_error("uts_mode", uts_mode)
+ self['UTSMode'] = uts_mode
+
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
@@ -573,7 +633,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
+ 'User': six.text_type(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index ee8624e..af3e5bc 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -5,6 +5,8 @@ try:
except ImportError:
import urllib3
+from ..errors import DockerException
+
class CancellableStream(object):
"""
@@ -13,7 +15,7 @@ class CancellableStream(object):
Example:
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -55,9 +57,17 @@ class CancellableStream(object):
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
else:
sock = sock_fp._sock
- if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 61857c2..9815018 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -14,7 +14,7 @@ class Healthcheck(DictType):
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- - ``["CMD-SHELL", command]``: RUn command in the system's
+ - ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
@@ -23,9 +23,9 @@ class Healthcheck(DictType):
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
- retries (integer): The number of consecutive failures needed to
+ retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (integer): Start period for the container to
+ start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
@@ -53,6 +53,8 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
+ if isinstance(value, six.string_types):
+ value = ["CMD-SHELL", value]
self['Test'] = value
@property
diff --git a/docker/types/services.py b/docker/types/services.py
index 31f4750..05dda15 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -26,8 +26,8 @@ class TaskTemplate(dict):
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
- networks (:py:class:`list`): List of network names or IDs to attach
- the containers to.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
@@ -110,13 +110,15 @@ class ContainerSpec(dict):
privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
+ init (boolean): Run an init inside the container that forwards signals
+ and reaps processes.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
- privileges=None, isolation=None):
+ privileges=None, isolation=None, init=None):
self['Image'] = image
if isinstance(command, six.string_types):
@@ -183,6 +185,9 @@ class ContainerSpec(dict):
if isolation is not None:
self['Isolation'] = isolation
+ if init is not None:
+ self['Init'] = init
+
class Mount(dict):
"""
@@ -368,10 +373,11 @@ class UpdateConfig(dict):
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
- delay (int): Amount of time between updates.
+ delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
- ``continue`` and ``pause``. Default: ``continue``
+ ``continue``, ``pause``, as well as ``rollback`` since API v1.28.
+ Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
@@ -385,9 +391,9 @@ class UpdateConfig(dict):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
- if failure_action not in ('pause', 'continue'):
+ if failure_action not in ('pause', 'continue', 'rollback'):
raise errors.InvalidArgument(
- 'failure_action must be either `pause` or `continue`.'
+ 'failure_action must be one of `pause`, `continue`, `rollback`'
)
self['FailureAction'] = failure_action
@@ -413,6 +419,30 @@ class UpdateConfig(dict):
self['Order'] = order
+class RollbackConfig(UpdateConfig):
+ """
+ Used to specify the way containe rollbacks should be performed by a service
+
+ Args:
+ parallelism (int): Maximum number of tasks to be rolled back in one
+ iteration (0 means unlimited parallelism). Default: 0
+ delay (int): Amount of time between rollbacks, in nanoseconds.
+ failure_action (string): Action to take if a rolled back task fails to
+ run, or stops running during the rollback. Acceptable values are
+ ``continue``, ``pause`` or ``rollback``.
+ Default: ``continue``
+ monitor (int): Amount of time to monitor each rolled back task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ a rollback before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out a
+ rolled back task. Either ``start_first`` or ``stop_first`` are
+ accepted.
+ """
+ pass
+
+
class RestartConditionTypesEnum(object):
_values = (
'none',
@@ -623,18 +653,24 @@ class Placement(dict):
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
- constraints (:py:class:`list`): A list of constraints
- preferences (:py:class:`list`): Preferences provide a way to make
- the scheduler aware of factors such as topology. They are
- provided in order from highest to lowest precedence.
- platforms (:py:class:`list`): A list of platforms expressed as
- ``(arch, os)`` tuples
+ constraints (:py:class:`list` of str): A list of constraints
+ preferences (:py:class:`list` of tuple): Preferences provide a way
+ to make the scheduler aware of factors such as topology. They
+ are provided in order from highest to lowest precedence and
+ are expressed as ``(strategy, descriptor)`` tuples. See
+ :py:class:`PlacementPreference` for details.
+ platforms (:py:class:`list` of tuple): A list of platforms
+ expressed as ``(arch, os)`` tuples
"""
def __init__(self, constraints=None, preferences=None, platforms=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
- self['Preferences'] = preferences
+ self['Preferences'] = []
+ for pref in preferences:
+ if isinstance(pref, tuple):
+ pref = PlacementPreference(*pref)
+ self['Preferences'].append(pref)
if platforms:
self['Platforms'] = []
for plat in platforms:
@@ -643,6 +679,27 @@ class Placement(dict):
})
+class PlacementPreference(dict):
+ """
+ Placement preference to be used as an element in the list of
+ preferences for :py:class:`Placement` objects.
+
+ Args:
+ strategy (string): The placement strategy to implement. Currently,
+ the only supported strategy is ``spread``.
+ descriptor (string): A label descriptor. For the spread strategy,
+ the scheduler will try to spread tasks evenly over groups of
+ nodes identified by this label.
+ """
+ def __init__(self, strategy, descriptor):
+ if strategy != 'spread':
+ raise errors.InvalidArgument(
+ 'PlacementPreference strategy value is invalid ({}):'
+ ' must be "spread".'.format(strategy)
+ )
+ self['Spread'] = {'SpreadDescriptor': descriptor}
+
+
class DNSConfig(dict):
"""
Specification for DNS related configurations in resolver configuration
@@ -662,7 +719,7 @@ class DNSConfig(dict):
class Privileges(dict):
- """
+ r"""
Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition.
@@ -713,3 +770,21 @@ class Privileges(dict):
if len(selinux_context) > 0:
self['SELinuxContext'] = selinux_context
+
+
+class NetworkAttachmentConfig(dict):
+ """
+ Network attachment options for a service.
+
+ Args:
+ target (str): The target network for attachment.
+ Can be a network name or ID.
+ aliases (:py:class:`list`): A list of discoverable alternate names
+ for the service.
+ options (:py:class:`dict`): Driver attachment options for the
+ network target.
+ """
+ def __init__(self, target, aliases=None, options=None):
+ self['Target'] = target
+ self['Aliases'] = aliases
+ self['DriverOpts'] = options
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index bf7d697..a50cc02 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -3,11 +3,11 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
- "((?P<host>[a-fA-F\d.:]+):)?" # Address
- "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ r"((?P<host>[a-fA-F\d.:]+):)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
- "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
- "(?P<proto>/(udp|tcp))?" # Protocol
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py
new file mode 100644
index 0000000..49e98ed
--- /dev/null
+++ b/docker/utils/proxy.py
@@ -0,0 +1,73 @@
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format(
+ self.http, self.https, self.ftp, self.no_proxy)
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 7b96d4f..7ba9505 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -12,6 +12,10 @@ except ImportError:
NpipeSocket = type(None)
+STDOUT = 1
+STDERR = 2
+
+
class SocketError(Exception):
pass
@@ -51,28 +55,43 @@ def read_exactly(socket, n):
return data
-def next_frame_size(socket):
+def next_frame_header(socket):
"""
- Returns the size of the next frame of data waiting to be read from socket,
- according to the protocol defined here:
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
- https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
- return -1
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
+
- _, actual = struct.unpack('>BxxxL', data)
- return actual
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
-def frames_iter(socket):
+def frames_iter_no_tty(socket):
"""
- Returns a generator of frames read from socket
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
"""
while True:
- n = next_frame_size(socket)
+ (stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
@@ -84,13 +103,13 @@ def frames_iter(socket):
# We have reached EOF
return
n -= data_length
- yield result
+ yield (stream, result)
-def socket_raw_iter(socket):
+def frames_iter_tty(socket):
"""
- Returns a generator of data read from the socket.
- This is used for non-multiplexed streams.
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
"""
while True:
result = read(socket)
@@ -98,3 +117,53 @@ def socket_raw_iter(socket):
# We have reached EOF
return
yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return six.binary_type().join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ assert frame != (None, None)
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError('{0} is not a valid stream'.format(stream_id))
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index fe3b9a5..7819ace 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,10 +1,11 @@
import base64
+import json
import os
import os.path
-import json
import shlex
-from distutils.version import StrictVersion
+import string
from datetime import datetime
+from distutils.version import StrictVersion
import six
@@ -13,11 +14,12 @@ from .. import tls
if six.PY2:
from urllib import splitnport
+ from urlparse import urlparse
else:
- from urllib.parse import splitnport
+ from urllib.parse import splitnport, urlparse
DEFAULT_HTTP_HOST = "127.0.0.1"
-DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
@@ -212,75 +214,93 @@ def parse_repository_tag(repo_name):
return repo_name, None
-# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
-# fd:// protocol unsupported (for obvious reasons)
-# Added support for http and https
-# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
- proto = "http+unix"
- port = None
path = ''
+ port = None
+ host = None
+ # Sensible defaults
if not addr and is_win32:
- addr = DEFAULT_NPIPE
-
+ return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
- if addr.startswith('http://'):
- addr = addr.replace('http://', 'tcp://')
- if addr.startswith('http+unix://'):
- addr = addr.replace('http+unix://', 'unix://')
- if addr == 'tcp://':
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse('//' + addr, 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
- "Invalid bind address format: {0}".format(addr)
+ "Invalid bind address protocol: {}".format(addr)
)
- elif addr.startswith('unix://'):
- addr = addr[7:]
- elif addr.startswith('tcp://'):
- proto = 'http{0}'.format('s' if tls else '')
- addr = addr[6:]
- elif addr.startswith('https://'):
- proto = "https"
- addr = addr[8:]
- elif addr.startswith('npipe://'):
- proto = 'npipe'
- addr = addr[8:]
- elif addr.startswith('fd://'):
- raise errors.DockerException("fd protocol is not implemented")
- else:
- if "://" in addr:
- raise errors.DockerException(
- "Invalid bind address protocol: {0}".format(addr)
- )
- proto = "https" if tls else "http"
- if proto in ("http", "https"):
- address_parts = addr.split('/', 1)
- host = address_parts[0]
- if len(address_parts) == 2:
- path = '/' + address_parts[1]
- host, port = splitnport(host)
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ 'Invalid bind address format: {}'.format(addr)
+ )
- if port is None:
- raise errors.DockerException(
- "Invalid port: {0}".format(addr)
- )
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ 'Invalid bind address format: {}'.format(addr)
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: no path allowed for this protocol:'
+ ' {}'.format(addr)
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = '/'.join((parsed_url.hostname, path))
+
+ if proto in ('tcp', 'ssh'):
+ # parsed_url.hostname strips brackets from IPv6 addresses,
+ # which can be problematic hence our use of splitnport() instead.
+ host, port = splitnport(parsed_url.netloc)
+ if port is None or port < 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: port is required:'
+ ' {}'.format(addr)
+ )
+ port = 22
if not host:
host = DEFAULT_HTTP_HOST
- else:
- host = addr
- if proto in ("http", "https") and port == -1:
- raise errors.DockerException(
- "Bind address needs a port: {0}".format(addr))
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = 'http{}'.format('s' if tls else '')
+ elif proto == 'unix':
+ proto = 'http+unix'
- if proto == "http+unix" or proto == 'npipe':
- return "{0}://{1}".format(proto, host).rstrip('/')
- return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
+ if proto in ('http+unix', 'npipe'):
+ return "{}://{}".format(proto, path).rstrip('/')
+ return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
@@ -332,9 +352,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
params = {}
if host:
- params['base_url'] = (
- host.replace('tcp://', 'https://') if enable_tls else host
- )
+ params['base_url'] = host
if not enable_tls:
return params
@@ -366,7 +384,10 @@ def convert_filters(filters):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
- result[k] = v
+ result[k] = [
+ str(item) if not isinstance(item, six.string_types) else item
+ for item in v
+ ]
return json.dumps(result)
@@ -421,7 +442,7 @@ def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
- return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
+ return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
diff --git a/docker/version.py b/docker/version.py
index d451374..99a8b42 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "3.4.1"
+version = "4.1.0"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/requirements.txt b/requirements.txt
index 6c5e7d0..804a78a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,17 +2,18 @@ appdirs==1.4.3
asn1crypto==0.22.0
backports.ssl-match-hostname==3.5.0.1
cffi==1.10.0
-cryptography==1.9
-docker-pycreds==0.3.0
+cryptography==2.3
enum34==1.1.6
idna==2.5
ipaddress==1.0.18
packaging==16.8
+paramiko==2.4.2
pycparser==2.17
-pyOpenSSL==17.0.0
+pyOpenSSL==18.0.0
pyparsing==2.2.0
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
-pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
-requests==2.14.2
+pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
+requests==2.20.0
six==1.10.0
-websocket-client==0.40.0
+urllib3==1.24.3
+websocket-client==0.56.0
diff --git a/setup.py b/setup.py
index 57b2b5a..c29787b 100644
--- a/setup.py
+++ b/setup.py
@@ -4,16 +4,16 @@ from __future__ import print_function
import codecs
import os
-from setuptools import setup, find_packages
+from setuptools import find_packages
+from setuptools import setup
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.14.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
- 'docker-pycreds >= 0.3.0'
+ 'requests >= 2.14.2, != 2.18.0',
]
extras_require = {
@@ -27,7 +27,7 @@ extras_require = {
# Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
# on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
- ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
+ ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
@@ -37,7 +37,11 @@ extras_require = {
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
- 'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+
+ # Only required when connecting using the ssh:// protocol
+ 'ssh': ['paramiko>=2.4.2'],
+
}
version = None
@@ -48,24 +52,27 @@ with open('./test-requirements.txt') as test_reqs_txt:
long_description = ''
-try:
- with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
- long_description = readme_rst.read()
-except IOError:
- # README.rst is only generated on release. Its absence should not prevent
- # setup.py from working properly.
- pass
+with codecs.open('./README.md', encoding='utf-8') as readme_md:
+ long_description = readme_md.read()
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
+ long_description_content_type='text/markdown',
url='https://github.com/docker/docker-py',
+ project_urls={
+ 'Documentation': 'https://docker-py.readthedocs.io',
+ 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
+ 'Source': 'https://github.com/docker/docker-py',
+ 'Tracker': 'https://github.com/docker/docker-py/issues',
+ },
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
zip_safe=False,
test_suite='tests',
classifiers=[
@@ -77,10 +84,10 @@ setup(
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
diff --git a/test-requirements.txt b/test-requirements.txt
index 09680b6..0b01e56 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,6 +1,6 @@
-coverage==3.7.1
-flake8==3.4.1
+coverage==4.5.2
+flake8==3.6.0
mock==1.0.1
-pytest==2.9.1
-pytest-cov==2.1.0
-pytest-timeout==1.2.1
+pytest==4.3.1
+pytest-cov==2.6.1
+pytest-timeout==1.3.3
diff --git a/tests/gpg-keys/ownertrust b/tests/gpg-keys/ownertrust
new file mode 100644
index 0000000..141ea57
--- /dev/null
+++ b/tests/gpg-keys/ownertrust
@@ -0,0 +1,3 @@
+# List of assigned trustvalues, created Wed 25 Apr 2018 01:28:17 PM PDT
+# (Use "gpg --import-ownertrust" to restore them)
+9781B87DAB042E6FD51388A5464ED987A7B21401:6:
diff --git a/tests/gpg-keys/secret b/tests/gpg-keys/secret
new file mode 100644
index 0000000..412294d
--- /dev/null
+++ b/tests/gpg-keys/secret
Binary files differ
diff --git a/tests/helpers.py b/tests/helpers.py
index b36d6d7..f344e1c 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -2,15 +2,16 @@ import functools
import os
import os.path
import random
+import re
+import socket
import tarfile
import tempfile
import time
-import re
-import six
-import socket
import docker
+import paramiko
import pytest
+import six
def make_tree(dirs, files):
@@ -118,10 +119,18 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
# If we're using a Unix socket, the sock.send call will fail with a
# BrokenPipeError ; INET sockets will just stop receiving / sending data
# but will not raise an error
- if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
- with pytest.raises(socket.error):
+ if isinstance(sock, paramiko.Channel):
+ with pytest.raises(OSError):
sock.sendall(b'make sure the socket is closed\n')
else:
+ if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
+ # We do not want to use pytest.raises here because future versions
+ # of the daemon no longer cause this to raise an error.
+ try:
+ sock.sendall(b'make sure the socket is closed\n')
+ except socket.error:
+ return
+
sock.sendall(b"make sure the socket is closed\n")
data = sock.recv(128)
# New in 18.06: error message is broadcast over the socket when reading
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
index baaf33e..5712812 100644
--- a/tests/integration/api_build_test.py
+++ b/tests/integration/api_build_test.py
@@ -4,15 +4,58 @@ import shutil
import tempfile
from docker import errors
+from docker.utils.proxy import ProxyConfig
import pytest
import six
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
from ..helpers import random_name, requires_api_version, requires_experimental
class BuildTest(BaseAPIIntegrationTest):
+ def test_build_with_proxy(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=a"',
+ 'RUN env | grep "ftp_proxy=a"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script, decode=True)
+
+ def test_build_with_proxy_and_buildargs(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=XXX"',
+ 'RUN env | grep "ftp_proxy=xxx"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(
+ fileobj=script,
+ decode=True,
+ buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'}
+ )
+
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
@@ -234,7 +277,7 @@ class BuildTest(BaseAPIIntegrationTest):
# Set up pingable endpoint on custom network
network = self.client.create_network(random_name())['Id']
self.tmp_networks.append(network)
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
self.client.connect_container_to_network(
@@ -405,8 +448,10 @@ class BuildTest(BaseAPIIntegrationTest):
for _ in stream:
pass
- assert excinfo.value.status_code == 400
- assert 'invalid platform' in excinfo.exconly()
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
def test_build_out_of_context_dockerfile(self):
base_dir = tempfile.mkdtemp()
@@ -540,6 +585,11 @@ class BuildTest(BaseAPIIntegrationTest):
) == sorted(lsdata)
@requires_api_version('1.31')
+ @pytest.mark.xfail(
+ True,
+ reason='Currently fails on 18.09: '
+ 'https://github.com/moby/moby/issues/37920'
+ )
def test_prune_builds(self):
prune_result = self.client.prune_builds()
assert 'SpaceReclaimed' in prune_result
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
index 905e064..9e348f3 100644
--- a/tests/integration/api_client_test.py
+++ b/tests/integration/api_client_test.py
@@ -47,7 +47,7 @@ class ConnectionTimeoutTest(unittest.TestCase):
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
- except:
+ except: # noqa: E722
pass
end = time.time()
assert res is None
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
index ff70148..1ba3eaa 100644
--- a/tests/integration/api_container_test.py
+++ b/tests/integration/api_container_test.py
@@ -5,28 +5,27 @@ import tempfile
import threading
from datetime import datetime
-import docker
-from docker.constants import IS_WINDOWS_PLATFORM
-from docker.utils.socket import next_frame_size
-from docker.utils.socket import read_exactly
-
import pytest
-
import requests
import six
-from .base import BUSYBOX, BaseAPIIntegrationTest
+import docker
from .. import helpers
-from ..helpers import (
- requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
-)
+from ..helpers import assert_cat_socket_detached_with_keys
+from ..helpers import ctrl_with
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
+from .base import TEST_IMG
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.utils.socket import next_frame_header
+from docker.utils.socket import read_exactly
class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
- res1 = self.client.create_container(BUSYBOX, 'true')
+ res1 = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res1
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
@@ -38,20 +37,20 @@ class ListContainersTest(BaseAPIIntegrationTest):
assert 'Command' in retrieved
assert retrieved['Command'] == six.text_type('true')
assert 'Image' in retrieved
- assert re.search(r'busybox:.*', retrieved['Image'])
+ assert re.search(r'alpine:.*', retrieved['Image'])
assert 'Status' in retrieved
class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
def test_create_with_host_pid_mode(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
@@ -66,7 +65,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_links(self):
res0 = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
@@ -76,7 +75,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container1_id)
res1 = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
@@ -95,7 +94,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
- BUSYBOX, 'env', host_config=self.client.create_host_config(
+ TEST_IMG, 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='bridge'
)
@@ -115,7 +114,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_restart_policy(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '2'],
+ TEST_IMG, ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
@@ -134,21 +133,21 @@ class CreateContainerTest(BaseAPIIntegrationTest):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
- BUSYBOX, 'true', name=vol_names[0]
+ TEST_IMG, 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
- BUSYBOX, 'true', name=vol_names[1]
+ TEST_IMG, 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
res = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True,
+ TEST_IMG, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
@@ -162,7 +161,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def create_container_readonly_fs(self):
ctnr = self.client.create_container(
- BUSYBOX, ['mkdir', '/shrine'],
+ TEST_IMG, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
@@ -174,7 +173,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
assert res != 0
def create_container_with_name(self):
- res = self.client.create_container(BUSYBOX, 'true', name='foobar')
+ res = self.client.create_container(TEST_IMG, 'true', name='foobar')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
@@ -183,7 +182,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def create_container_privileged(self):
res = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
@@ -209,7 +208,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
+ TEST_IMG, ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
@@ -221,7 +220,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_group_id_ints(self):
container = self.client.create_container(
- BUSYBOX, 'id -G',
+ TEST_IMG, 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
@@ -237,7 +236,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_group_id_strings(self):
container = self.client.create_container(
- BUSYBOX, 'id -G', host_config=self.client.create_host_config(
+ TEST_IMG, 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
@@ -260,7 +259,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -282,7 +281,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500
container = self.client.create_container(
- BUSYBOX, ['true'], host_config=self.client.create_host_config(
+ TEST_IMG, ['true'], host_config=self.client.create_host_config(
log_config=log_config
)
)
@@ -297,7 +296,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -316,7 +315,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -330,7 +329,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_memory_constraints_with_str(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(
memswap_limit='1G',
mem_limit='700M'
@@ -348,7 +347,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_memory_constraints_with_int(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(mem_swappiness=40)
)
assert 'Id' in ctnr
@@ -362,16 +361,15 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
- BUSYBOX,
+ TEST_IMG,
['echo'],
environment={'Foo': None, 'Other': 'one', 'Blank': ''},
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container['Id'])
- assert (
- sorted(config['Config']['Env']) ==
- sorted(['Foo', 'Other=one', 'Blank='])
- )
+ assert 'Foo' in config['Config']['Env']
+ assert 'Other=one' in config['Config']['Env']
+ assert 'Blank=' in config['Config']['Env']
@requires_api_version('1.22')
def test_create_with_tmpfs(self):
@@ -380,7 +378,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
}
container = self.client.create_container(
- BUSYBOX,
+ TEST_IMG,
['echo'],
host_config=self.client.create_host_config(
tmpfs=tmpfs))
@@ -392,7 +390,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_create_with_isolation(self):
container = self.client.create_container(
- BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ TEST_IMG, ['echo'], host_config=self.client.create_host_config(
isolation='default'
)
)
@@ -406,7 +404,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
auto_remove=True
)
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], host_config=host_config
+ TEST_IMG, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -415,7 +413,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_stop_timeout(self):
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], stop_timeout=25
+ TEST_IMG, ['echo', 'test'], stop_timeout=25
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -428,7 +426,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
storage_opt={'size': '120G'}
)
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], host_config=host_config
+ TEST_IMG, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container)
config = self.client.inspect_container(container)
@@ -439,7 +437,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_init(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(
init=True
)
@@ -448,25 +446,12 @@ class CreateContainerTest(BaseAPIIntegrationTest):
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['Init'] is True
- @pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
- @requires_api_version('1.25')
- def test_create_with_init_path(self):
- ctnr = self.client.create_container(
- BUSYBOX, 'true',
- host_config=self.client.create_host_config(
- init_path="/usr/libexec/docker-init"
- )
- )
- self.tmp_containers.append(ctnr['Id'])
- config = self.client.inspect_container(ctnr)
- assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
-
@requires_api_version('1.24')
@pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
def test_create_with_cpu_rt_options(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
cpu_rt_period=1000, cpu_rt_runtime=500
)
)
@@ -479,7 +464,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_device_cgroup_rules(self):
rule = 'c 7:128 rwm'
ctnr = self.client.create_container(
- BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list',
+ TEST_IMG, 'cat /sys/fs/cgroup/devices/devices.list',
host_config=self.client.create_host_config(
device_cgroup_rules=[rule]
)
@@ -490,6 +475,16 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
assert rule in self.client.logs(ctnr).decode('utf-8')
+ def test_create_with_uts_mode(self):
+ container = self.client.create_container(
+ TEST_IMG, ['echo'], host_config=self.client.create_host_config(
+ uts_mode='host'
+ )
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['UTSMode'] == 'host'
+
@pytest.mark.xfail(
IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
@@ -506,7 +501,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['touch', os.path.join(self.mount_dest, self.filename)],
)
@@ -514,7 +509,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
container = self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
@@ -528,12 +523,12 @@ class VolumeBindTest(BaseAPIIntegrationTest):
def test_create_with_binds_ro(self):
self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['touch', os.path.join(self.mount_dest, self.filename)],
)
container = self.run_with_volume(
True,
- BUSYBOX,
+ TEST_IMG,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
@@ -552,7 +547,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.run_container(
- BUSYBOX, ['ls', self.mount_dest],
+ TEST_IMG, ['ls', self.mount_dest],
host_config=host_config
)
assert container
@@ -571,7 +566,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.run_container(
- BUSYBOX, ['ls', self.mount_dest],
+ TEST_IMG, ['ls', self.mount_dest],
host_config=host_config
)
assert container
@@ -590,7 +585,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.client.create_container(
- BUSYBOX, ['true'], host_config=host_config,
+ TEST_IMG, ['true'], host_config=host_config,
)
assert container
inspect_data = self.client.inspect_container(container)
@@ -636,7 +631,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -655,7 +650,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -673,7 +668,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
test_file.write(data)
test_file.seek(0)
ctnr = self.client.create_container(
- BUSYBOX,
+ TEST_IMG,
'cat {0}'.format(
os.path.join('/vol1/', os.path.basename(test_file.name))
),
@@ -695,7 +690,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
dirs = ['foo', 'bar']
base = helpers.make_tree(dirs, files)
ctnr = self.client.create_container(
- BUSYBOX, 'ls -p /vol1', volumes=['/vol1']
+ TEST_IMG, 'ls -p /vol1', volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with docker.utils.tar(base) as test_tar:
@@ -716,7 +711,7 @@ class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
@@ -730,7 +725,7 @@ class RenameContainerTest(BaseAPIIntegrationTest):
class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
@@ -746,7 +741,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
assert inspect['State']['ExitCode'] == 0
def test_start_container_with_dict_instead_of_id(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res)
@@ -774,7 +769,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
'true && echo "Night of Nights"'
]
for cmd in commands:
- container = self.client.create_container(BUSYBOX, cmd)
+ container = self.client.create_container(TEST_IMG, cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -784,7 +779,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
- res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
@@ -797,7 +792,7 @@ class WaitTest(BaseAPIIntegrationTest):
assert inspect['State']['ExitCode'] == exitcode
def test_wait_with_dict_instead_of_id(self):
- res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
@@ -811,13 +806,13 @@ class WaitTest(BaseAPIIntegrationTest):
@requires_api_version('1.30')
def test_wait_with_condition(self):
- ctnr = self.client.create_container(BUSYBOX, 'true')
+ ctnr = self.client.create_container(TEST_IMG, 'true')
self.tmp_containers.append(ctnr)
with pytest.raises(requests.exceptions.ConnectionError):
self.client.wait(ctnr, condition='removed', timeout=1)
ctnr = self.client.create_container(
- BUSYBOX, ['sleep', '3'],
+ TEST_IMG, ['sleep', '3'],
host_config=self.client.create_host_config(auto_remove=True)
)
self.tmp_containers.append(ctnr)
@@ -831,7 +826,7 @@ class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -845,7 +840,7 @@ class LogsTest(BaseAPIIntegrationTest):
snippet = '''Line1
Line2'''
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -858,7 +853,7 @@ Line2'''
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -873,10 +868,12 @@ Line2'''
assert logs == (snippet + '\n').encode(encoding='ascii')
@pytest.mark.timeout(5)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
def test_logs_streaming_and_follow_and_cancel(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ TEST_IMG, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -894,7 +891,7 @@ Line2'''
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -907,7 +904,7 @@ Line2'''
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
@@ -921,7 +918,7 @@ Line2'''
def test_logs_with_until(self):
snippet = 'Shanghai Teahouse (Hong Meiling)'
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, 'echo "{0}"'.format(snippet)
)
self.tmp_containers.append(container)
@@ -936,7 +933,7 @@ Line2'''
class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -949,7 +946,7 @@ class DiffTest(BaseAPIIntegrationTest):
assert test_diff[0]['Kind'] == 1
def test_diff_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -964,7 +961,7 @@ class DiffTest(BaseAPIIntegrationTest):
class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -976,7 +973,7 @@ class StopTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_stop_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
assert 'Id' in container
id = container['Id']
self.client.start(container)
@@ -991,7 +988,7 @@ class StopTest(BaseAPIIntegrationTest):
class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1005,7 +1002,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_kill_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1019,7 +1016,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_kill_with_signal(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(id)
self.client.start(id)
self.client.kill(
@@ -1036,7 +1033,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False, state
def test_kill_with_signal_name(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
@@ -1051,7 +1048,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False, state
def test_kill_with_signal_integer(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
@@ -1068,14 +1065,19 @@ class KillTest(BaseAPIIntegrationTest):
class PortTest(BaseAPIIntegrationTest):
def test_port(self):
-
port_bindings = {
'1111': ('127.0.0.1', '4567'),
- '2222': ('127.0.0.1', '4568')
+ '2222': ('127.0.0.1', '4568'),
+ '3333/udp': ('127.0.0.1', '4569'),
}
+ ports = [
+ 1111,
+ 2222,
+ (3333, 'udp'),
+ ]
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
+ TEST_IMG, ['sleep', '60'], ports=ports,
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
@@ -1086,13 +1088,15 @@ class PortTest(BaseAPIIntegrationTest):
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
+ port, _, protocol = port.partition('/')
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
- assert ip == port_bindings[port][0]
- assert host_port == port_bindings[port][1]
+ port_binding = port if not protocol else port + "/" + protocol
+ assert ip == port_bindings[port_binding][0]
+ assert host_port == port_bindings[port_binding][1]
self.client.kill(id)
@@ -1100,7 +1104,7 @@ class PortTest(BaseAPIIntegrationTest):
class ContainerTopTest(BaseAPIIntegrationTest):
def test_top(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60']
+ TEST_IMG, ['sleep', '60']
)
self.tmp_containers.append(container)
@@ -1120,7 +1124,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
)
def test_top_with_psargs(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'])
+ TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(container)
@@ -1136,7 +1140,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1155,16 +1159,16 @@ class RestartContainerTest(BaseAPIIntegrationTest):
self.client.kill(id)
def test_restart_with_low_timeout(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.client.start(container)
- self.client.timeout = 1
- self.client.restart(container, timeout=3)
+ self.client.timeout = 3
+ self.client.restart(container, timeout=1)
self.client.timeout = None
- self.client.restart(container, timeout=3)
+ self.client.restart(container, timeout=1)
self.client.kill(container)
def test_restart_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
assert 'Id' in container
id = container['Id']
self.client.start(container)
@@ -1186,7 +1190,7 @@ class RestartContainerTest(BaseAPIIntegrationTest):
class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
- container = self.client.create_container(BUSYBOX, ['true'])
+ container = self.client.create_container(TEST_IMG, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
@@ -1196,7 +1200,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
assert len(res) == 0
def test_remove_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['true'])
+ container = self.client.create_container(TEST_IMG, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
@@ -1208,7 +1212,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
- container = self.client.create_container(BUSYBOX, '/bin/sh',
+ container = self.client.create_container(TEST_IMG, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
@@ -1220,7 +1224,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = "printf '{0}'".format(line)
- container = self.client.create_container(BUSYBOX, command,
+ container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
@@ -1230,31 +1234,37 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
- next_size = next_frame_size(pty_stdout)
+ (stream, next_size) = next_frame_header(pty_stdout)
+ assert stream == 1 # correspond to stdout
assert next_size == len(line)
data = read_exactly(pty_stdout, next_size)
assert data.decode('utf-8') == line
def test_attach_no_stream(self):
container = self.client.create_container(
- BUSYBOX, 'echo hello'
+ TEST_IMG, 'echo hello'
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
- @pytest.mark.timeout(5)
+ @pytest.mark.timeout(10)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
+ @pytest.mark.xfail(condition=os.environ.get('DOCKER_TLS_VERIFY') or
+ os.environ.get('DOCKER_CERT_PATH'),
+ reason='Flaky test on TLS')
def test_attach_stream_and_cancel(self):
container = self.client.create_container(
- BUSYBOX, 'sh -c "echo hello && sleep 60"',
+ TEST_IMG, 'sh -c "sleep 2 && echo hello && sleep 60"',
tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=True, logs=True)
- threading.Timer(1, output.close).start()
+ threading.Timer(3, output.close).start()
lines = []
for line in output:
@@ -1265,7 +1275,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
def test_detach_with_default(self):
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1284,7 +1294,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1301,7 +1311,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1317,7 +1327,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
@@ -1348,9 +1358,9 @@ class PruneTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_prune_containers(self):
container1 = self.client.create_container(
- BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
+ TEST_IMG, ['sh', '-c', 'echo hello > /data.txt']
)
- container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container2 = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.client.start(container1)
self.client.start(container2)
self.client.wait(container1)
@@ -1363,7 +1373,7 @@ class PruneTest(BaseAPIIntegrationTest):
class GetContainerStatsTest(BaseAPIIntegrationTest):
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
@@ -1377,7 +1387,7 @@ class GetContainerStatsTest(BaseAPIIntegrationTest):
def test_get_container_stats_stream(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
@@ -1395,7 +1405,7 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
old_mem_limit = 400 * 1024 * 1024
new_mem_limit = 300 * 1024 * 1024
container = self.client.create_container(
- BUSYBOX, 'top', host_config=self.client.create_host_config(
+ TEST_IMG, 'top', host_config=self.client.create_host_config(
mem_limit=old_mem_limit
)
)
@@ -1416,7 +1426,7 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
'Name': 'on-failure'
}
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
host_config=self.client.create_host_config(
restart_policy=old_restart_policy
)
@@ -1440,7 +1450,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
- BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpu_shares=cpu_shares
)
)
@@ -1452,7 +1462,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
- BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpuset_cpus=cpuset_cpus
)
)
@@ -1464,7 +1474,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_runtime(self):
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], runtime='runc'
+ TEST_IMG, ['echo', 'test'], runtime='runc'
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -1475,7 +1485,7 @@ class LinkTest(BaseAPIIntegrationTest):
def test_remove_link(self):
# Create containers
container1 = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
@@ -1487,7 +1497,7 @@ class LinkTest(BaseAPIIntegrationTest):
link_alias = 'mylink'
container2 = self.client.create_container(
- BUSYBOX, 'cat', host_config=self.client.create_host_config(
+ TEST_IMG, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}
)
)
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
index 1a5a4e5..554e862 100644
--- a/tests/integration/api_exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -1,15 +1,54 @@
-from docker.utils.socket import next_frame_size
+from ..helpers import assert_cat_socket_detached_with_keys
+from ..helpers import ctrl_with
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
+from .base import TEST_IMG
+from docker.utils.proxy import ProxyConfig
+from docker.utils.socket import next_frame_header
from docker.utils.socket import read_exactly
-from .base import BaseAPIIntegrationTest, BUSYBOX
-from ..helpers import (
- requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
-)
-
class ExecTest(BaseAPIIntegrationTest):
+ def test_execute_command_with_proxy_env(self):
+ # Set a custom proxy config on the client
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', https='b', http='c', no_proxy='d'
+ )
+
+ container = self.client.create_container(
+ TEST_IMG, 'cat', detach=True, stdin_open=True,
+ )
+ self.client.start(container)
+ self.tmp_containers.append(container)
+
+ cmd = 'sh -c "env | grep -i proxy"'
+
+ # First, just make sure the environment variables from the custom
+ # config are set
+
+ res = self.client.exec_create(container, cmd=cmd)
+ output = self.client.exec_start(res).decode('utf-8').split('\n')
+ expected = [
+ 'ftp_proxy=a', 'https_proxy=b', 'http_proxy=c', 'no_proxy=d',
+ 'FTP_PROXY=a', 'HTTPS_PROXY=b', 'HTTP_PROXY=c', 'NO_PROXY=d'
+ ]
+ for item in expected:
+ assert item in output
+
+ # Overwrite some variables with a custom environment
+ env = {'https_proxy': 'xxx', 'HTTPS_PROXY': 'XXX'}
+
+ res = self.client.exec_create(container, cmd=cmd, environment=env)
+ output = self.client.exec_start(res).decode('utf-8').split('\n')
+ expected = [
+ 'ftp_proxy=a', 'https_proxy=xxx', 'http_proxy=c', 'no_proxy=d',
+ 'FTP_PROXY=a', 'HTTPS_PROXY=XXX', 'HTTP_PROXY=c', 'NO_PROXY=d'
+ ]
+ for item in expected:
+ assert item in output
+
def test_execute_command(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -22,7 +61,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'hello\n'
def test_exec_command_string(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -35,20 +74,20 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'hello world\n'
def test_exec_command_as_user(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- res = self.client.exec_create(id, 'whoami', user='default')
+ res = self.client.exec_create(id, 'whoami', user='postgres')
assert 'Id' in res
exec_log = self.client.exec_start(res)
- assert exec_log == b'default\n'
+ assert exec_log == b'postgres\n'
def test_exec_command_as_root(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -61,7 +100,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'root\n'
def test_exec_command_streaming(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
@@ -76,7 +115,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert res == b'hello\nworld\n'
def test_exec_start_socket(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
@@ -91,13 +130,14 @@ class ExecTest(BaseAPIIntegrationTest):
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
- next_size = next_frame_size(socket)
+ (stream, next_size) = next_frame_header(socket)
+ assert stream == 1 # stdout (0 = stdin, 1 = stdout, 2 = stderr)
assert next_size == len(line)
data = read_exactly(socket, next_size)
assert data.decode('utf-8') == line
def test_exec_start_detached(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
@@ -112,7 +152,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert response == ""
def test_exec_inspect(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -127,7 +167,7 @@ class ExecTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_exec_command_with_env(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -142,18 +182,18 @@ class ExecTest(BaseAPIIntegrationTest):
@requires_api_version('1.35')
def test_exec_command_with_workdir(self):
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
self.tmp_containers.append(container)
self.client.start(container)
- res = self.client.exec_create(container, 'pwd', workdir='/var/www')
+ res = self.client.exec_create(container, 'pwd', workdir='/var/opt')
exec_log = self.client.exec_start(res)
- assert exec_log == b'/var/www\n'
+ assert exec_log == b'/var/opt\n'
def test_detach_with_default(self):
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
@@ -172,7 +212,7 @@ class ExecTest(BaseAPIIntegrationTest):
def test_detach_with_config_file(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
@@ -186,20 +226,87 @@ class ExecTest(BaseAPIIntegrationTest):
assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
- def test_detach_with_arg(self):
- self.client._general_configs['detachKeys'] = 'ctrl-p'
- container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
- )
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
- exec_id = self.client.exec_create(
- id, 'cat',
- stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
+class ExecDemuxTest(BaseAPIIntegrationTest):
+ cmd = 'sh -c "{}"'.format(' ; '.join([
+ # Write something on stdout
+ 'echo hello out',
+ # Busybox's sleep does not handle sub-second times.
+ # This loops takes ~0.3 second to execute on my machine.
+ 'sleep 0.5',
+ # Write something on stderr
+ 'echo hello err >&2'])
+ )
+
+ def setUp(self):
+ super(ExecDemuxTest, self).setUp()
+ self.container = self.client.create_container(
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
- sock = self.client.exec_start(exec_id, tty=True, socket=True)
- self.addCleanup(sock.close)
+ self.client.start(self.container)
+ self.tmp_containers.append(self.container)
- assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
+ def test_exec_command_no_stream_no_demux(self):
+ # tty=False, stream=False, demux=False
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = self.client.exec_start(res)
+ assert b'hello out\n' in exec_log
+ assert b'hello err\n' in exec_log
+
+ def test_exec_command_stream_no_demux(self):
+ # tty=False, stream=True, demux=False
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = list(self.client.exec_start(res, stream=True))
+ assert len(exec_log) == 2
+ assert b'hello out\n' in exec_log
+ assert b'hello err\n' in exec_log
+
+ def test_exec_command_no_stream_demux(self):
+ # tty=False, stream=False, demux=True
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = self.client.exec_start(res, demux=True)
+ assert exec_log == (b'hello out\n', b'hello err\n')
+
+ def test_exec_command_stream_demux(self):
+ # tty=False, stream=True, demux=True
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = list(self.client.exec_start(res, demux=True, stream=True))
+ assert len(exec_log) == 2
+ assert (b'hello out\n', None) in exec_log
+ assert (None, b'hello err\n') in exec_log
+
+ def test_exec_command_tty_no_stream_no_demux(self):
+ # tty=True, stream=False, demux=False
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello out\r\nhello err\r\n'
+
+ def test_exec_command_tty_stream_no_demux(self):
+ # tty=True, stream=True, demux=False
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = list(self.client.exec_start(res, stream=True))
+ assert b'hello out\r\n' in exec_log
+ if len(exec_log) == 2:
+ assert b'hello err\r\n' in exec_log
+ else:
+ assert len(exec_log) == 3
+ assert b'hello err' in exec_log
+ assert b'\r\n' in exec_log
+
+ def test_exec_command_tty_no_stream_demux(self):
+ # tty=True, stream=False, demux=True
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = self.client.exec_start(res, demux=True)
+ assert exec_log == (b'hello out\r\nhello err\r\n', None)
+
+ def test_exec_command_tty_stream_demux(self):
+ # tty=True, stream=True, demux=True
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = list(self.client.exec_start(res, demux=True, stream=True))
+ assert (b'hello out\r\n', None) in exec_log
+ if len(exec_log) == 2:
+ assert (b'hello err\r\n', None) in exec_log
+ else:
+ assert len(exec_log) == 3
+ assert (b'hello err', None) in exec_log
+ assert (b'\r\n', None) in exec_log
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
index 5dbac37..c54583b 100644
--- a/tests/integration/api_healthcheck_test.py
+++ b/tests/integration/api_healthcheck_test.py
@@ -1,4 +1,4 @@
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
from .. import helpers
SECOND = 1000000000
@@ -16,7 +16,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_shell_command(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ TEST_IMG, 'top', healthcheck=dict(test='echo "hello world"'))
self.tmp_containers.append(container)
res = self.client.inspect_container(container)
@@ -27,7 +27,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_passes(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="true",
interval=1 * SECOND,
timeout=1 * SECOND,
@@ -40,7 +40,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_fails(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="false",
interval=1 * SECOND,
timeout=1 * SECOND,
@@ -53,7 +53,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.29')
def test_healthcheck_start_period(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="echo 'x' >> /counter.txt && "
"test `cat /counter.txt | wc -l` -ge 3",
interval=1 * SECOND,
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
index 050e7f3..2bc96ab 100644
--- a/tests/integration/api_image_test.py
+++ b/tests/integration/api_image_test.py
@@ -15,7 +15,7 @@ from six.moves import socketserver
import docker
from ..helpers import requires_api_version, requires_experimental
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class ListImagesTest(BaseAPIIntegrationTest):
@@ -69,13 +69,15 @@ class PullImageTest(BaseAPIIntegrationTest):
with pytest.raises(docker.errors.APIError) as excinfo:
self.client.pull('hello-world', platform='foobar')
- assert excinfo.value.status_code == 500
- assert 'invalid platform' in excinfo.exconly()
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -88,13 +90,13 @@ class CommitTest(BaseAPIIntegrationTest):
assert img['Container'].startswith(id)
assert 'ContainerConfig' in img
assert 'Image' in img['ContainerConfig']
- assert BUSYBOX == img['ContainerConfig']['Image']
- busybox_id = self.client.inspect_image(BUSYBOX)['Id']
+ assert TEST_IMG == img['ContainerConfig']['Image']
+ busybox_id = self.client.inspect_image(TEST_IMG)['Id']
assert 'Parent' in img
assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
- cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ cid = self.client.create_container(TEST_IMG, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
@@ -110,7 +112,7 @@ class CommitTest(BaseAPIIntegrationTest):
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -317,7 +319,7 @@ class PruneImagesTest(BaseAPIIntegrationTest):
pass
# Ensure busybox does not get pruned
- ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ ctnr = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
@@ -341,7 +343,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
- stream = self.client.get_image(BUSYBOX)
+ stream = self.client.get_image(TEST_IMG)
for chunk in stream:
f.write(chunk)
@@ -349,7 +351,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
result = self.client.load_image(f.read())
success = False
- result_line = 'Loaded image: {}\n'.format(BUSYBOX)
+ result_line = 'Loaded image: {}\n'.format(TEST_IMG)
for data in result:
print(data)
if 'stream' in data:
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
index b6726d0..0f26827 100644
--- a/tests/integration/api_network_test.py
+++ b/tests/integration/api_network_test.py
@@ -3,13 +3,13 @@ from docker.types import IPAMConfig, IPAMPool
import pytest
from ..helpers import random_name, requires_api_version
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class TestNetworks(BaseAPIIntegrationTest):
def tearDown(self):
- super(TestNetworks, self).tearDown()
self.client.leave_swarm(force=True)
+ super(TestNetworks, self).tearDown()
def create_network(self, *args, **kwargs):
net_name = random_name()
@@ -92,7 +92,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -119,7 +119,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -144,7 +144,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -161,7 +161,7 @@ class TestNetworks(BaseAPIIntegrationTest):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image=BUSYBOX,
+ image=TEST_IMG,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
@@ -181,7 +181,7 @@ class TestNetworks(BaseAPIIntegrationTest):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image=BUSYBOX,
+ image=TEST_IMG,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
@@ -211,7 +211,7 @@ class TestNetworks(BaseAPIIntegrationTest):
),
)
container = self.client.create_container(
- image=BUSYBOX, command='top',
+ image=TEST_IMG, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -237,7 +237,7 @@ class TestNetworks(BaseAPIIntegrationTest):
),
)
container = self.client.create_container(
- image=BUSYBOX, command='top',
+ image=TEST_IMG, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -257,7 +257,7 @@ class TestNetworks(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
- BUSYBOX, 'top',
+ TEST_IMG, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
index 1150b09..38f9d12 100644
--- a/tests/integration/api_plugin_test.py
+++ b/tests/integration/api_plugin_test.py
@@ -3,7 +3,7 @@ import os
import docker
import pytest
-from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from .base import BaseAPIIntegrationTest
from ..helpers import requires_api_version
SSHFS = 'vieux/sshfs:latest'
@@ -13,27 +13,27 @@ SSHFS = 'vieux/sshfs:latest'
class PluginTest(BaseAPIIntegrationTest):
@classmethod
def teardown_class(cls):
- c = docker.APIClient(
- version=TEST_API_VERSION, timeout=60,
- **docker.utils.kwargs_from_env()
- )
+ client = cls.get_client_instance()
try:
- c.remove_plugin(SSHFS, force=True)
+ client.remove_plugin(SSHFS, force=True)
except docker.errors.APIError:
pass
def teardown_method(self, method):
+ client = self.get_client_instance()
try:
- self.client.disable_plugin(SSHFS)
+ client.disable_plugin(SSHFS)
except docker.errors.APIError:
pass
for p in self.tmp_plugins:
try:
- self.client.remove_plugin(p, force=True)
+ client.remove_plugin(p, force=True)
except docker.errors.APIError:
pass
+ client.close()
+
def ensure_plugin_installed(self, plugin_name):
try:
return self.client.inspect_plugin(plugin_name)
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
index 85f9dcc..b6b7ec5 100644
--- a/tests/integration/api_service_test.py
+++ b/tests/integration/api_service_test.py
@@ -10,7 +10,7 @@ import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
)
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class ServiceTest(BaseAPIIntegrationTest):
@@ -60,7 +60,7 @@ class ServiceTest(BaseAPIIntegrationTest):
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
return name, self.client.create_service(
@@ -156,7 +156,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_custom_log_driver(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
log_cfg = docker.types.DriverConfig('none')
task_tmpl = docker.types.TaskTemplate(
@@ -174,7 +174,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_volume_mount(self):
vol_name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['ls'],
+ TEST_IMG, ['ls'],
mounts=[
docker.types.Mount(target='/test', source=vol_name)
]
@@ -194,7 +194,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert mount['Type'] == 'volume'
def test_create_service_with_resources_constraints(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
resources = docker.types.Resources(
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
@@ -214,7 +214,7 @@ class ServiceTest(BaseAPIIntegrationTest):
]
def _create_service_with_generic_resources(self, generic_resources):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
resources = docker.types.Resources(
generic_resources=generic_resources
@@ -265,7 +265,7 @@ class ServiceTest(BaseAPIIntegrationTest):
self._create_service_with_generic_resources(test_input)
def test_create_service_with_update_config(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
@@ -281,6 +281,20 @@ class ServiceTest(BaseAPIIntegrationTest):
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
+ @requires_api_version('1.28')
+ def test_create_service_with_failure_action_rollback(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(failure_action='rollback')
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
@requires_api_version('1.25')
def test_create_service_with_update_config_monitor(self):
container_spec = docker.types.ContainerSpec('busybox', ['true'])
@@ -298,8 +312,29 @@ class ServiceTest(BaseAPIIntegrationTest):
assert update_config['Monitor'] == uc['Monitor']
assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+ @requires_api_version('1.28')
+ def test_create_service_with_rollback_config(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ rollback_cfg = docker.types.RollbackConfig(
+ parallelism=10, delay=5, failure_action='pause',
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, rollback_config=rollback_cfg, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RollbackConfig' in svc_info['Spec']
+ rc = svc_info['Spec']['RollbackConfig']
+ assert rollback_cfg['Parallelism'] == rc['Parallelism']
+ assert rollback_cfg['Delay'] == rc['Delay']
+ assert rollback_cfg['FailureAction'] == rc['FailureAction']
+ assert rollback_cfg['Monitor'] == rc['Monitor']
+ assert rollback_cfg['MaxFailureRatio'] == rc['MaxFailureRatio']
+
def test_create_service_with_restart_policy(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
policy = docker.types.RestartPolicy(
docker.types.RestartPolicy.condition_types.ANY,
delay=5, max_attempts=5
@@ -322,7 +357,7 @@ class ServiceTest(BaseAPIIntegrationTest):
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
@@ -336,9 +371,38 @@ class ServiceTest(BaseAPIIntegrationTest):
{'Target': net1['Id']}, {'Target': net2['Id']}
]
+ def test_create_service_with_network_attachment_config(self):
+ network = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(network['Id'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ network_config = docker.types.NetworkAttachmentConfig(
+ target='dockerpytest_1',
+ aliases=['dockerpytest_1_alias'],
+ options={
+ 'foo': 'bar'
+ }
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ networks=[network_config]
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ service_networks_info = svc_info['Spec']['TaskTemplate']['Networks']
+ assert len(service_networks_info) == 1
+ assert service_networks_info[0]['Target'] == network['Id']
+ assert service_networks_info[0]['Aliases'] == ['dockerpytest_1_alias']
+ assert service_networks_info[0]['DriverOpts'] == {'foo': 'bar'}
+
def test_create_service_with_placement(self):
node_id = self.client.nodes()[0]['ID']
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=['node.id=={}'.format(node_id)]
)
@@ -351,7 +415,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(
constraints=['node.id=={}'.format(node_id)]
)
@@ -366,7 +430,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.30')
def test_create_service_with_placement_platform(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
@@ -379,7 +443,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.27')
def test_create_service_with_placement_preferences(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(preferences=[
{'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
])
@@ -392,8 +456,23 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+ @requires_api_version('1.27')
+ def test_create_service_with_placement_preferences_tuple(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ placemt = docker.types.Placement(preferences=(
+ ('spread', 'com.dockerpy.test'),
+ ))
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
def test_create_service_with_endpoint_spec(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -423,7 +502,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.32')
def test_create_service_with_endpoint_spec_host_publish_mode(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -443,7 +522,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
+ TEST_IMG, ['true'], env={'DOCKER_PY_TEST': 1}
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
@@ -459,7 +538,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.29')
def test_create_service_with_update_order(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, order='start-first'
@@ -478,7 +557,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_service_with_tty(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['true'], tty=True
+ TEST_IMG, ['true'], tty=True
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
@@ -495,7 +574,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_service_with_tty_dict(self):
container_spec = {
- 'Image': BUSYBOX,
+ 'Image': TEST_IMG,
'Command': ['true'],
'TTY': True
}
@@ -511,7 +590,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_global_mode(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -524,7 +603,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_replicated_mode(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -717,7 +796,7 @@ class ServiceTest(BaseAPIIntegrationTest):
search=['local'], options=['debug']
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], dns_config=dns_config
+ TEST_IMG, ['sleep', '999'], dns_config=dns_config
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -737,7 +816,7 @@ class ServiceTest(BaseAPIIntegrationTest):
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck=hc
+ TEST_IMG, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -754,7 +833,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.28')
def test_create_service_with_readonly(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], read_only=True
+ TEST_IMG, ['sleep', '999'], read_only=True
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -768,7 +847,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.28')
def test_create_service_with_stop_signal(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
+ TEST_IMG, ['sleep', '999'], stop_signal='SIGINT'
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -786,7 +865,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_privileges(self):
priv = docker.types.Privileges(selinux_disable=True)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], privileges=priv
+ TEST_IMG, ['sleep', '999'], privileges=priv
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -800,6 +879,20 @@ class ServiceTest(BaseAPIIntegrationTest):
)
assert privileges['SELinuxContext']['Disable'] is True
+ @requires_api_version('1.38')
+ def test_create_service_with_init(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], init=True
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Init' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Init'] is True
+ )
+
@requires_api_version('1.25')
def test_update_service_with_defaults_name(self):
container_spec = docker.types.ContainerSpec(
@@ -928,7 +1021,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert labels['container.label'] == 'SampleLabel'
def test_update_service_with_defaults_update_config(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
@@ -967,7 +1060,7 @@ class ServiceTest(BaseAPIIntegrationTest):
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
@@ -1006,7 +1099,7 @@ class ServiceTest(BaseAPIIntegrationTest):
]
def test_update_service_with_defaults_endpoint_spec(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -1070,7 +1163,7 @@ class ServiceTest(BaseAPIIntegrationTest):
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck=hc
+ TEST_IMG, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -1085,7 +1178,7 @@ class ServiceTest(BaseAPIIntegrationTest):
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck={}
+ TEST_IMG, ['sleep', '999'], healthcheck={}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
index dbf3786..f1cbc26 100644
--- a/tests/integration/api_swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -13,14 +13,13 @@ class SwarmTest(BaseAPIIntegrationTest):
self._unlock_key = None
def tearDown(self):
- super(SwarmTest, self).tearDown()
try:
if self._unlock_key:
self.client.unlock_swarm(self._unlock_key)
except docker.errors.APIError:
pass
-
force_leave_swarm(self.client)
+ super(SwarmTest, self).tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):
@@ -36,6 +35,35 @@ class SwarmTest(BaseAPIIntegrationTest):
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_defaults(self):
+ assert self.init_swarm()
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
+ assert results['SubnetSize'] == 24
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_only_pool(self):
+ assert self.init_swarm(default_addr_pool=['2.0.0.0/16'])
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'}
+ assert results['SubnetSize'] == 24
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_only_subnet_size(self):
+ assert self.init_swarm(subnet_size=26)
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
+ assert results['SubnetSize'] == 26
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_both_args(self):
+ assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'],
+ subnet_size=28)
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'}
+ assert results['SubnetSize'] == 28
+
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
@@ -158,12 +186,14 @@ class SwarmTest(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_inspect_node(self):
- assert self.init_swarm()
+ node_id = self.init_swarm()
+ assert node_id
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
+ assert node_id == node['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
@@ -205,3 +235,21 @@ class SwarmTest(BaseAPIIntegrationTest):
self.client.remove_node(node_id, True)
assert e.value.response.status_code >= 400
+
+ @requires_api_version('1.25')
+ def test_rotate_manager_unlock_key(self):
+ spec = self.client.create_swarm_spec(autolock_managers=True)
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ key_1 = self.client.get_unlock_key()
+ assert self.client.update_swarm(
+ version=swarm_info['Version']['Index'],
+ rotate_manager_unlock_key=True
+ )
+ key_2 = self.client.get_unlock_key()
+ assert key_1['UnlockKey'] != key_2['UnlockKey']
+
+ @requires_api_version('1.30')
+ @pytest.mark.xfail(reason='Can fail if eth0 has multiple IP addresses')
+ def test_init_swarm_data_path_addr(self):
+ assert self.init_swarm(data_path_addr='eth0')
diff --git a/tests/integration/base.py b/tests/integration/base.py
index 56c23ed..a7613f6 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -3,11 +3,10 @@ import shutil
import unittest
import docker
-from docker.utils import kwargs_from_env
-
from .. import helpers
+from docker.utils import kwargs_from_env
-BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_IMG = 'alpine:3.10'
TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
@@ -29,41 +28,44 @@ class BaseIntegrationTest(unittest.TestCase):
def tearDown(self):
client = docker.from_env(version=TEST_API_VERSION)
- for img in self.tmp_imgs:
- try:
- client.api.remove_image(img)
- except docker.errors.APIError:
- pass
- for container in self.tmp_containers:
- try:
- client.api.remove_container(container, force=True, v=True)
- except docker.errors.APIError:
- pass
- for network in self.tmp_networks:
- try:
- client.api.remove_network(network)
- except docker.errors.APIError:
- pass
- for volume in self.tmp_volumes:
- try:
- client.api.remove_volume(volume)
- except docker.errors.APIError:
- pass
-
- for secret in self.tmp_secrets:
- try:
- client.api.remove_secret(secret)
- except docker.errors.APIError:
- pass
-
- for config in self.tmp_configs:
- try:
- client.api.remove_config(config)
- except docker.errors.APIError:
- pass
-
- for folder in self.tmp_folders:
- shutil.rmtree(folder)
+ try:
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+ finally:
+ client.close()
class BaseAPIIntegrationTest(BaseIntegrationTest):
@@ -106,7 +108,7 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
return container
- def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
+ def create_and_start(self, image=TEST_IMG, command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 4e8d268..ec48835 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -7,7 +7,7 @@ import docker.errors
from docker.utils import kwargs_from_env
import pytest
-from .base import BUSYBOX
+from .base import TEST_IMG
@pytest.fixture(autouse=True, scope='session')
@@ -15,15 +15,15 @@ def setup_test_session():
warnings.simplefilter('error')
c = docker.APIClient(version='auto', **kwargs_from_env())
try:
- c.inspect_image(BUSYBOX)
+ c.inspect_image(TEST_IMG)
except docker.errors.NotFound:
- print("\npulling {0}".format(BUSYBOX), file=sys.stderr)
- for data in c.pull(BUSYBOX, stream=True, decode=True):
+ print("\npulling {0}".format(TEST_IMG), file=sys.stderr)
+ for data in c.pull(TEST_IMG, stream=True, decode=True):
status = data.get("status")
progress = data.get("progress")
detail = "{0} - {1}".format(status, progress)
print(detail, file=sys.stderr)
# Double make sure we now have busybox
- c.inspect_image(BUSYBOX)
+ c.inspect_image(TEST_IMG)
c.close()
diff --git a/tests/integration/credentials/__init__.py b/tests/integration/credentials/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/credentials/__init__.py
diff --git a/tests/integration/credentials/store_test.py b/tests/integration/credentials/store_test.py
new file mode 100644
index 0000000..dd543e2
--- /dev/null
+++ b/tests/integration/credentials/store_test.py
@@ -0,0 +1,87 @@
+import os
+import random
+import sys
+
+import pytest
+import six
+from distutils.spawn import find_executable
+
+from docker.credentials import (
+ CredentialsNotFound, Store, StoreError, DEFAULT_LINUX_STORE,
+ DEFAULT_OSX_STORE
+)
+
+
+class TestStore(object):
+ def teardown_method(self):
+ for server in self.tmp_keys:
+ try:
+ self.store.erase(server)
+ except StoreError:
+ pass
+
+ def setup_method(self):
+ self.tmp_keys = []
+ if sys.platform.startswith('linux'):
+ if find_executable('docker-credential-' + DEFAULT_LINUX_STORE):
+ self.store = Store(DEFAULT_LINUX_STORE)
+ elif find_executable('docker-credential-pass'):
+ self.store = Store('pass')
+ else:
+ raise Exception('No supported docker-credential store in PATH')
+ elif sys.platform.startswith('darwin'):
+ self.store = Store(DEFAULT_OSX_STORE)
+
+ def get_random_servername(self):
+ res = 'pycreds_test_{:x}'.format(random.getrandbits(32))
+ self.tmp_keys.append(res)
+ return res
+
+ def test_store_and_get(self):
+ key = self.get_random_servername()
+ self.store.store(server=key, username='user', secret='pass')
+ data = self.store.get(key)
+ assert data == {
+ 'ServerURL': key,
+ 'Username': 'user',
+ 'Secret': 'pass'
+ }
+
+ def test_get_nonexistent(self):
+ key = self.get_random_servername()
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_store_and_erase(self):
+ key = self.get_random_servername()
+ self.store.store(server=key, username='user', secret='pass')
+ self.store.erase(key)
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_unicode_strings(self):
+ key = self.get_random_servername()
+ key = six.u(key)
+ self.store.store(server=key, username='user', secret='pass')
+ data = self.store.get(key)
+ assert data
+ self.store.erase(key)
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_list(self):
+ names = (self.get_random_servername(), self.get_random_servername())
+ self.store.store(names[0], username='sakuya', secret='izayoi')
+ self.store.store(names[1], username='reimu', secret='hakurei')
+ data = self.store.list()
+ assert names[0] in data
+ assert data[names[0]] == 'sakuya'
+ assert names[1] in data
+ assert data[names[1]] == 'reimu'
+
+ def test_execute_with_env_override(self):
+ self.store.exe = 'env'
+ self.store.environment = {'FOO': 'bar'}
+ data = self.store._execute('--null', '')
+ assert b'\0FOO=bar\0' in data
+ assert 'FOO' not in os.environ
diff --git a/tests/integration/credentials/utils_test.py b/tests/integration/credentials/utils_test.py
new file mode 100644
index 0000000..ad55f32
--- /dev/null
+++ b/tests/integration/credentials/utils_test.py
@@ -0,0 +1,22 @@
+import os
+
+from docker.credentials.utils import create_environment_dict
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+@mock.patch.dict(os.environ)
+def test_create_environment_dict():
+ base = {'FOO': 'bar', 'BAZ': 'foobar'}
+ os.environ = base
+ assert create_environment_dict({'FOO': 'baz'}) == {
+ 'FOO': 'baz', 'BAZ': 'foobar',
+ }
+ assert create_environment_dict({'HELLO': 'world'}) == {
+ 'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world',
+ }
+
+ assert os.environ == base
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
index ac74d72..7bf156a 100644
--- a/tests/integration/errors_test.py
+++ b/tests/integration/errors_test.py
@@ -1,11 +1,11 @@
from docker.errors import APIError
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class ErrorsTest(BaseAPIIntegrationTest):
def test_api_error_parses_json(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '10'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '10'])
self.client.start(container['Id'])
with pytest.raises(APIError) as cm:
self.client.remove_container(container['Id'])
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
index ab41ea5..eac4c97 100644
--- a/tests/integration/models_containers_test.py
+++ b/tests/integration/models_containers_test.py
@@ -1,10 +1,14 @@
+import os
import tempfile
import threading
-import docker
import pytest
-from .base import BaseIntegrationTest, TEST_API_VERSION
-from ..helpers import random_name, requires_api_version
+
+import docker
+from ..helpers import random_name
+from ..helpers import requires_api_version
+from .base import BaseIntegrationTest
+from .base import TEST_API_VERSION
class ContainerCollectionTest(BaseIntegrationTest):
@@ -122,7 +126,9 @@ class ContainerCollectionTest(BaseIntegrationTest):
def test_run_with_auto_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
- 'alpine', 'echo hello', auto_remove=True
+ # sleep(2) to allow any communication with the container
+ # before it gets removed by the host.
+ 'alpine', 'sh -c "echo hello && sleep 2"', auto_remove=True
)
assert out == b'hello\n'
@@ -131,7 +137,10 @@ class ContainerCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.ContainerError) as e:
client.containers.run(
- 'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
+ # sleep(2) to allow any communication with the container
+ # before it gets removed by the host.
+ 'alpine', 'sh -c ">&2 echo error && sleep 2 && exit 1"',
+ auto_remove=True
)
assert e.value.exit_status == 1
assert e.value.stderr is None
@@ -146,6 +155,8 @@ class ContainerCollectionTest(BaseIntegrationTest):
assert logs[1] == b'world\n'
@pytest.mark.timeout(5)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
def test_run_with_streamed_logs_and_cancel(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
@@ -160,6 +171,17 @@ class ContainerCollectionTest(BaseIntegrationTest):
assert logs[0] == b'hello\n'
assert logs[1] == b'world\n'
+ def test_run_with_proxy_config(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.api._proxy_configs = docker.utils.proxy.ProxyConfig(
+ ftp='sakuya.jp:4967'
+ )
+
+ out = client.containers.run('alpine', 'sh -c "env"')
+
+ assert b'FTP_PROXY=sakuya.jp:4967\n' in out
+ assert b'ftp_proxy=sakuya.jp:4967\n' in out
+
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
@@ -325,6 +347,66 @@ class ContainerTest(BaseIntegrationTest):
'memory_stats', 'blkio_stats']:
assert key in stats
+ def test_ports_target_none(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = None
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host is ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
+ def test_ports_target_tuple(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = ('127.0.0.1', 1111)
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host == ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
+ def test_ports_target_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = [1234, 4567]
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host == ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
def test_stop(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "top", detach=True)
@@ -362,3 +444,13 @@ class ContainerTest(BaseIntegrationTest):
detach=True)
self.tmp_containers.append(container.id)
assert container.wait()['StatusCode'] == 1
+
+ def test_create_with_volume_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.create(
+ 'alpine',
+ 'sleep 300',
+ volume_driver='foo'
+ )
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['VolumeDriver'] == 'foo'
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
index ae735ba..375d972 100644
--- a/tests/integration/models_images_test.py
+++ b/tests/integration/models_images_test.py
@@ -4,7 +4,8 @@ import tempfile
import docker
import pytest
-from .base import BaseIntegrationTest, BUSYBOX, TEST_API_VERSION
+from .base import BaseIntegrationTest, TEST_IMG, TEST_API_VERSION
+from ..helpers import random_name
class ImageCollectionTest(BaseIntegrationTest):
@@ -71,8 +72,8 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_pull_with_tag(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.pull('alpine', tag='3.3')
- assert 'alpine:3.3' in image.attrs['RepoTags']
+ image = client.images.pull('alpine', tag='3.10')
+ assert 'alpine:3.10' in image.attrs['RepoTags']
def test_pull_with_sha(self):
image_ref = (
@@ -96,7 +97,7 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_save_and_load(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.get(BUSYBOX)
+ image = client.images.get(TEST_IMG)
with tempfile.TemporaryFile() as f:
stream = image.save()
for chunk in stream:
@@ -108,6 +109,32 @@ class ImageCollectionTest(BaseIntegrationTest):
assert len(result) == 1
assert result[0].id == image.id
+ def test_save_and_load_repo_name(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(TEST_IMG)
+ additional_tag = random_name()
+ image.tag(additional_tag)
+ self.tmp_imgs.append(additional_tag)
+ image.reload()
+ with tempfile.TemporaryFile() as f:
+ stream = image.save(named='{}:latest'.format(additional_tag))
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ client.images.remove(additional_tag, force=True)
+ result = client.images.load(f.read())
+
+ assert len(result) == 1
+ assert result[0].id == image.id
+ assert '{}:latest'.format(additional_tag) in result[0].tags
+
+ def test_save_name_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(TEST_IMG)
+ with pytest.raises(docker.errors.InvalidArgument):
+ image.save(named='sakuya/izayoi')
+
class ImageTest(BaseIntegrationTest):
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
index f39f0d3..6c1836d 100644
--- a/tests/integration/models_swarm_test.py
+++ b/tests/integration/models_swarm_test.py
@@ -31,3 +31,15 @@ class SwarmTest(unittest.TestCase):
cm.value.response.status_code == 406 or
cm.value.response.status_code == 503
)
+
+ def test_join_on_already_joined_swarm(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init()
+ join_token = client.swarm.attrs['JoinTokens']['Manager']
+ with pytest.raises(docker.errors.APIError) as cm:
+ client.swarm.join(
+ remote_addrs=['127.0.0.1'],
+ join_token=join_token,
+ )
+ assert cm.value.response.status_code == 503
+ assert 'This node is already part of a swarm.' in cm.value.explanation
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index 0fd4e43..a63883c 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -4,7 +4,7 @@ import random
import docker
import six
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
@@ -14,12 +14,12 @@ class TestRegressions(BaseAPIIntegrationTest):
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
- assert exc.value.response.status_code == 500
+ assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
- self.client.create_container(BUSYBOX, ['true'])
+ self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
@@ -30,12 +30,12 @@ class TestRegressions(BaseAPIIntegrationTest):
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
- ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
+ ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
- ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
+ ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
@@ -47,7 +47,7 @@ class TestRegressions(BaseAPIIntegrationTest):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
- BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
+ TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py
index a7f34fd..7e07a26 100644
--- a/tests/unit/api_build_test.py
+++ b/tests/unit/api_build_test.py
@@ -1,12 +1,16 @@
import gzip
import io
+import shutil
import docker
from docker import auth
+from docker.api.build import process_dockerfile
-from .api_test import BaseAPIClientTest, fake_request, url_prefix
import pytest
+from ..helpers import make_tree
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
+
class BuildTest(BaseAPIClientTest):
def test_build_container(self):
@@ -61,7 +65,7 @@ class BuildTest(BaseAPIClientTest):
)
def test_build_remote_with_registry_auth(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -69,7 +73,7 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
expected_params = {'t': None, 'q': False, 'dockerfile': None,
'rm': False, 'nocache': False, 'pull': False,
@@ -77,7 +81,7 @@ class BuildTest(BaseAPIClientTest):
'remote': 'https://github.com/docker-library/mongo'}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
)
}
@@ -111,7 +115,7 @@ class BuildTest(BaseAPIClientTest):
})
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -119,12 +123,12 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
headers = {}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
)
}
@@ -132,7 +136,7 @@ class BuildTest(BaseAPIClientTest):
assert headers == expected_headers
def test_set_auth_headers_with_dict_and_auth_configs(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -140,12 +144,12 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
headers = {'foo': 'bar'}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
),
'foo': 'bar'
}
@@ -161,3 +165,61 @@ class BuildTest(BaseAPIClientTest):
self.client._set_auth_headers(headers)
assert headers == expected_headers
+
+ @pytest.mark.skipif(
+ not docker.constants.IS_WINDOWS_PLATFORM,
+ reason='Windows-specific syntax')
+ def test_process_dockerfile_win_longpath_prefix(self):
+ dirs = [
+ 'foo', 'foo/bar', 'baz',
+ ]
+
+ files = [
+ 'Dockerfile', 'foo/Dockerfile.foo', 'foo/bar/Dockerfile.bar',
+ 'baz/Dockerfile.baz',
+ ]
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ def pre(path):
+ return docker.constants.WINDOWS_LONGPATH_PREFIX + path
+
+ assert process_dockerfile(None, pre(base)) == (None, None)
+ assert process_dockerfile('Dockerfile', pre(base)) == (
+ 'Dockerfile', None
+ )
+ assert process_dockerfile('foo/Dockerfile.foo', pre(base)) == (
+ 'foo/Dockerfile.foo', None
+ )
+ assert process_dockerfile(
+ '../Dockerfile', pre(base + '\\foo')
+ )[1] is not None
+ assert process_dockerfile(
+ '../baz/Dockerfile.baz', pre(base + '/baz')
+ ) == ('../baz/Dockerfile.baz', None)
+
+ def test_process_dockerfile(self):
+ dirs = [
+ 'foo', 'foo/bar', 'baz',
+ ]
+
+ files = [
+ 'Dockerfile', 'foo/Dockerfile.foo', 'foo/bar/Dockerfile.bar',
+ 'baz/Dockerfile.baz',
+ ]
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ assert process_dockerfile(None, base) == (None, None)
+ assert process_dockerfile('Dockerfile', base) == ('Dockerfile', None)
+ assert process_dockerfile('foo/Dockerfile.foo', base) == (
+ 'foo/Dockerfile.foo', None
+ )
+ assert process_dockerfile(
+ '../Dockerfile', base + '/foo'
+ )[1] is not None
+ assert process_dockerfile('../baz/Dockerfile.baz', base + '/baz') == (
+ '../baz/Dockerfile.baz', None
+ )
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index af2bb1c..f4d220a 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -15,6 +15,7 @@ from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
+import struct
from . import fake_api
@@ -83,7 +84,7 @@ def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
-def fake_read_from_socket(self, response, stream, tty=False):
+def fake_read_from_socket(self, response, stream, tty=False, demux=False):
return six.binary_type()
@@ -105,8 +106,6 @@ class BaseAPIClientTest(unittest.TestCase):
)
self.patcher.start()
self.client = APIClient()
- # Force-clear authconfig to avoid tampering with the tests
- self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
@@ -221,13 +220,11 @@ class DockerApiTest(BaseAPIClientTest):
'username': 'sakuya', 'password': 'izayoi'
}
assert args[1]['headers'] == {'Content-Type': 'application/json'}
- assert self.client._auth_configs['auths'] == {
- 'docker.io': {
- 'email': None,
- 'password': 'izayoi',
- 'username': 'sakuya',
- 'serveraddress': None,
- }
+ assert self.client._auth_configs.auths['docker.io'] == {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
}
def test_events(self):
@@ -467,56 +464,124 @@ class UnixSocketStreamTest(unittest.TestCase):
class TCPSocketStreamTest(unittest.TestCase):
- text_data = b'''
+ stdout_data = b'''
Now, those children out there, they're jumping through the
flames in the hope that the god of the fire will make them fruitful.
Really, you can't blame them. After all, what girl would not prefer the
child of a god to that of some acne-scarred artisan?
'''
+ stderr_data = b'''
+ And what of the true God? To whose glory churches and monasteries have been
+ built on these islands for generations past? Now shall what of Him?
+ '''
- def setUp(self):
-
- self.server = six.moves.socketserver.ThreadingTCPServer(
- ('', 0), self.get_handler_class()
- )
- self.thread = threading.Thread(target=self.server.serve_forever)
- self.thread.setDaemon(True)
- self.thread.start()
- self.address = 'http://{}:{}'.format(
- socket.gethostname(), self.server.server_address[1]
- )
-
- def tearDown(self):
- self.server.shutdown()
- self.server.server_close()
- self.thread.join()
-
- def get_handler_class(self):
- text_data = self.text_data
+ @classmethod
+ def setup_class(cls):
+ cls.server = six.moves.socketserver.ThreadingTCPServer(
+ ('', 0), cls.get_handler_class())
+ cls.thread = threading.Thread(target=cls.server.serve_forever)
+ cls.thread.setDaemon(True)
+ cls.thread.start()
+ cls.address = 'http://{}:{}'.format(
+ socket.gethostname(), cls.server.server_address[1])
+
+ @classmethod
+ def teardown_class(cls):
+ cls.server.shutdown()
+ cls.server.server_close()
+ cls.thread.join()
+
+ @classmethod
+ def get_handler_class(cls):
+ stdout_data = cls.stdout_data
+ stderr_data = cls.stderr_data
class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
def do_POST(self):
+ resp_data = self.get_resp_data()
self.send_response(101)
self.send_header(
- 'Content-Type', 'application/vnd.docker.raw-stream'
- )
+ 'Content-Type', 'application/vnd.docker.raw-stream')
self.send_header('Connection', 'Upgrade')
self.send_header('Upgrade', 'tcp')
self.end_headers()
self.wfile.flush()
time.sleep(0.2)
- self.wfile.write(text_data)
+ self.wfile.write(resp_data)
self.wfile.flush()
+ def get_resp_data(self):
+ path = self.path.split('/')[-1]
+ if path == 'tty':
+ return stdout_data + stderr_data
+ elif path == 'no-tty':
+ data = b''
+ data += self.frame_header(1, stdout_data)
+ data += stdout_data
+ data += self.frame_header(2, stderr_data)
+ data += stderr_data
+ return data
+ else:
+ raise Exception('Unknown path {0}'.format(path))
+
+ @staticmethod
+ def frame_header(stream, data):
+ return struct.pack('>BxxxL', stream, len(data))
+
return Handler
- def test_read_from_socket(self):
+ def request(self, stream=None, tty=None, demux=None):
+ assert stream is not None and tty is not None and demux is not None
with APIClient(base_url=self.address) as client:
- resp = client._post(client._url('/dummy'), stream=True)
- data = client._read_from_socket(resp, stream=True, tty=True)
- results = b''.join(data)
-
- assert results == self.text_data
+ if tty:
+ url = client._url('/tty')
+ else:
+ url = client._url('/no-tty')
+ resp = client._post(url, stream=True)
+ return client._read_from_socket(
+ resp, stream=stream, tty=tty, demux=demux)
+
+ def test_read_from_socket_tty(self):
+ res = self.request(stream=True, tty=True, demux=False)
+ assert next(res) == self.stdout_data + self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_tty_demux(self):
+ res = self.request(stream=True, tty=True, demux=True)
+ assert next(res) == (self.stdout_data + self.stderr_data, None)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty(self):
+ res = self.request(stream=True, tty=False, demux=False)
+ assert next(res) == self.stdout_data
+ assert next(res) == self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty_demux(self):
+ res = self.request(stream=True, tty=False, demux=True)
+ assert (self.stdout_data, None) == next(res)
+ assert (None, self.stderr_data) == next(res)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_stream_tty(self):
+ res = self.request(stream=False, tty=True, demux=False)
+ assert res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_tty_demux(self):
+ res = self.request(stream=False, tty=True, demux=True)
+ assert res == (self.stdout_data + self.stderr_data, None)
+
+ def test_read_from_socket_no_stream_no_tty(self):
+ res = self.request(stream=False, tty=False, demux=False)
+ res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_no_tty_demux(self):
+ res = self.request(stream=False, tty=False, demux=True)
+ assert res == (self.stdout_data, self.stderr_data)
class UserAgentTest(unittest.TestCase):
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 947d680..aac8910 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -9,7 +9,7 @@ import shutil
import tempfile
import unittest
-from docker import auth, errors
+from docker import auth, credentials, errors
import pytest
try:
@@ -106,13 +106,13 @@ class ResolveAuthTest(unittest.TestCase):
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = {
+ auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
- }
+ })
def test_resolve_authconfig_hostname_only(self):
assert auth.resolve_authconfig(
@@ -211,70 +211,21 @@ class ResolveAuthTest(unittest.TestCase):
) is None
def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
- auth_config = {
+ auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': self.index_config,
}),
'credsStore': 'blackbox'
- }
- with mock.patch('docker.auth._resolve_authconfig_credstore') as m:
+ })
+ with mock.patch(
+ 'docker.auth.AuthConfig._resolve_authconfig_credstore'
+ ) as m:
m.return_value = None
assert 'indexuser' == auth.resolve_authconfig(
auth_config, None
)['username']
-class CredStoreTest(unittest.TestCase):
- def test_get_credential_store(self):
- auth_config = {
- 'credHelpers': {
- 'registry1.io': 'truesecret',
- 'registry2.io': 'powerlock'
- },
- 'credsStore': 'blackbox',
- }
-
- assert auth.get_credential_store(
- auth_config, 'registry1.io'
- ) == 'truesecret'
- assert auth.get_credential_store(
- auth_config, 'registry2.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'registry3.io'
- ) == 'blackbox'
-
- def test_get_credential_store_no_default(self):
- auth_config = {
- 'credHelpers': {
- 'registry1.io': 'truesecret',
- 'registry2.io': 'powerlock'
- },
- }
- assert auth.get_credential_store(
- auth_config, 'registry2.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'registry3.io'
- ) is None
-
- def test_get_credential_store_default_index(self):
- auth_config = {
- 'credHelpers': {
- 'https://index.docker.io/v1/': 'powerlock'
- },
- 'credsStore': 'truesecret'
- }
-
- assert auth.get_credential_store(auth_config, None) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'docker.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'images.io'
- ) == 'truesecret'
-
-
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
@@ -293,8 +244,8 @@ class LoadConfigTest(unittest.TestCase):
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_NAME] is not None
- cfg = cfg['auths'][auth.INDEX_NAME]
+ assert cfg.auths[auth.INDEX_NAME] is not None
+ cfg = cfg.auths[auth.INDEX_NAME]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -312,8 +263,8 @@ class LoadConfigTest(unittest.TestCase):
)
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_URL] is not None
- cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == email
@@ -335,8 +286,8 @@ class LoadConfigTest(unittest.TestCase):
}, f)
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_URL] is not None
- cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == email
@@ -360,7 +311,7 @@ class LoadConfigTest(unittest.TestCase):
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
- cfg = auth.load_config(dockercfg_path)['auths']
+ cfg = auth.load_config(dockercfg_path).auths
assert registry in cfg
assert cfg[registry] is not None
cfg = cfg[registry]
@@ -387,7 +338,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
- cfg = auth.load_config(None)['auths']
+ cfg = auth.load_config(None).auths
assert registry in cfg
assert cfg[registry] is not None
cfg = cfg[registry]
@@ -417,8 +368,8 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -446,8 +397,8 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -464,7 +415,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'auths': {}}
+ assert dict(cfg) == {'auths': {}}
def test_load_config_invalid_auth_dict(self):
folder = tempfile.mkdtemp()
@@ -479,7 +430,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'auths': {'scarlet.net': {}}}
+ assert dict(cfg) == {'auths': {'scarlet.net': {}}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
@@ -500,7 +451,352 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
+
+
+class CredstoreTest(unittest.TestCase):
+ def setUp(self):
+ self.authconfig = auth.AuthConfig({'credsStore': 'default'})
+ self.default_store = InMemoryStore('default')
+ self.authconfig._stores['default'] = self.default_store
+ self.default_store.store(
+ 'https://gensokyo.jp/v2', 'sakuya', 'izayoi',
+ )
+ self.default_store.store(
+ 'https://default.com/v2', 'user', 'hunter2',
+ )
+
+ def test_get_credential_store(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ })
+
+ assert auth_config.get_credential_store('registry1.io') == 'truesecret'
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ })
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ })
+
+ assert auth_config.get_credential_store(None) == 'powerlock'
+ assert auth_config.get_credential_store('docker.io') == 'powerlock'
+ assert auth_config.get_credential_store('images.io') == 'truesecret'
+
+ def test_get_credential_store_with_plain_dict(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_all_credentials_credstore_only(self):
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_credhelper(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ self.authconfig._stores['truesecret'] = InMemoryStore()
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': None,
+ }
+
+ def test_get_all_credentials_with_credhelpers_only(self):
+ del self.authconfig['credsStore']
+ assert self.authconfig.get_all_credentials() == {}
+
+ self.authconfig['credHelpers'] = {
+ 'https://gensokyo.jp/v2': 'default',
+ 'https://default.com/v2': 'default',
+ }
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_auths_entries(self):
+ self.authconfig.add_auth('registry1.io', {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_auths_entry(self):
+ self.authconfig.add_auth('default.com', {})
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_credstore_overrides_auth_entry(self):
+ self.authconfig.add_auth('default.com', {
+ 'Username': 'shouldnotsee',
+ 'Password': 'thisentry',
+ 'ServerAddress': 'https://default.com/v2',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_helpers_override_default(self):
+ self.authconfig['credHelpers'] = {
+ 'https://default.com/v2': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('https://default.com/v2', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_3_sources(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('registry1.io', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ self.authconfig.add_auth('registry2.io', {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ 'registry2.io': {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ }
+ }
+
+
+class InMemoryStore(credentials.Store):
+ def __init__(self, *args, **kwargs):
+ self.__store = {}
+
+ def get(self, server):
+ try:
+ return self.__store[server]
+ except KeyError:
+ raise credentials.errors.CredentialsNotFound()
+
+ def store(self, server, username, secret):
+ self.__store[server] = {
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret,
+ }
+
+ def list(self):
+ return dict(
+ [(k, v['Username']) for k, v in self.__store.items()]
+ )
+
+ def erase(self, server):
+ del self.__store[server]
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
index 2be0578..0689d07 100644
--- a/tests/unit/dockertypes_test.py
+++ b/tests/unit/dockertypes_test.py
@@ -14,7 +14,7 @@ from docker.types.services import convert_service_ports
try:
from unittest import mock
-except:
+except: # noqa: E722
import mock
@@ -85,6 +85,12 @@ class HostConfigTest(unittest.TestCase):
with pytest.raises(ValueError):
create_host_config(version='1.23', userns_mode='host12')
+ def test_create_host_config_with_uts(self):
+ config = create_host_config(version='1.15', uts_mode='host')
+ assert config.get('UTSMode') == 'host'
+ with pytest.raises(ValueError):
+ create_host_config(version='1.15', uts_mode='host12')
+
def test_create_host_config_with_oom_score_adj(self):
config = create_host_config(version='1.22', oom_score_adj=100)
assert config.get('OomScoreAdj') == 100
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
index e27a9b1..2134f86 100644
--- a/tests/unit/errors_test.py
+++ b/tests/unit/errors_test.py
@@ -79,6 +79,27 @@ class APIErrorTest(unittest.TestCase):
err = APIError('', response=resp)
assert err.is_client_error() is True
+ def test_is_error_300(self):
+ """Report no error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_error() is False
+
+ def test_is_error_400(self):
+ """Report error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
+ def test_is_error_500(self):
+ """Report error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
def test_create_error_from_exception(self):
resp = requests.Response()
resp.status_code = 500
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
index 48a5288..da5f0ab 100644
--- a/tests/unit/models_containers_test.py
+++ b/tests/unit/models_containers_test.py
@@ -95,6 +95,7 @@ class ContainerCollectionTest(unittest.TestCase):
ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
user='bob',
userns_mode='host',
+ uts_mode='host',
version='1.23',
volume_driver='some_driver',
volumes=[
@@ -174,6 +175,8 @@ class ContainerCollectionTest(unittest.TestCase):
'Tmpfs': {'/blah': ''},
'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
'UsernsMode': 'host',
+ 'UTSMode': 'host',
+ 'VolumeDriver': 'some_driver',
'VolumesFrom': ['container'],
},
healthcheck={'test': 'true'},
@@ -188,7 +191,6 @@ class ContainerCollectionTest(unittest.TestCase):
stop_signal=9,
tty=True,
user='bob',
- volume_driver='some_driver',
volumes=[
'/mnt/vol2',
'/mnt/vol1',
@@ -230,7 +232,9 @@ class ContainerCollectionTest(unittest.TestCase):
container = client.containers.run('alpine', 'sleep 300', detach=True)
assert container.id == FAKE_CONTAINER_ID
- client.api.pull.assert_called_with('alpine', platform=None, tag=None)
+ client.api.pull.assert_called_with(
+ 'alpine', platform=None, tag=None, stream=True
+ )
def test_run_with_error(self):
client = make_fake_client()
@@ -412,10 +416,11 @@ class ContainerTest(unittest.TestCase):
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
stdin=False, tty=False, privileged=True, user='', environment=None,
- workdir=None
+ workdir=None,
)
client.api.exec_start.assert_called_with(
- FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False,
+ demux=False,
)
def test_exec_run_failure(self):
@@ -425,10 +430,11 @@ class ContainerTest(unittest.TestCase):
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True,
stdin=False, tty=False, privileged=True, user='', environment=None,
- workdir=None
+ workdir=None,
)
client.api.exec_start.assert_called_with(
- FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False
+ FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False,
+ demux=False,
)
def test_export(self):
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
index 6783279..fd894ab 100644
--- a/tests/unit/models_images_test.py
+++ b/tests/unit/models_images_test.py
@@ -1,6 +1,8 @@
+import unittest
+import warnings
+
from docker.constants import DEFAULT_DATA_CHUNK_SIZE
from docker.models.images import Image
-import unittest
from .fake_api import FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
@@ -43,7 +45,9 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull(self):
client = make_fake_client()
image = client.images.pull('test_image:latest')
- client.api.pull.assert_called_with('test_image', tag='latest')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='latest', stream=True
+ )
client.api.inspect_image.assert_called_with('test_image:latest')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
@@ -51,7 +55,9 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull_multiple(self):
client = make_fake_client()
images = client.images.pull('test_image')
- client.api.pull.assert_called_with('test_image', tag=None)
+ client.api.pull.assert_called_with(
+ 'test_image', tag=None, stream=True
+ )
client.api.images.assert_called_with(
all=False, name='test_image', filters=None
)
@@ -61,6 +67,16 @@ class ImageCollectionTest(unittest.TestCase):
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
+ def test_pull_with_stream_param(self):
+ client = make_fake_client()
+ with warnings.catch_warnings(record=True) as w:
+ client.images.pull('test_image', stream=True)
+
+ assert len(w) == 1
+ assert str(w[0].message).startswith(
+ '`stream` is not a valid parameter'
+ )
+
def test_push(self):
client = make_fake_client()
client.images.push('foobar', insecure_registry=True)
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
index 247bb4a..a4ac50c 100644
--- a/tests/unit/models_services_test.py
+++ b/tests/unit/models_services_test.py
@@ -26,6 +26,8 @@ class CreateServiceKwargsTest(unittest.TestCase):
'mounts': [{'some': 'mounts'}],
'stop_grace_period': 5,
'constraints': ['foo=bar'],
+ 'preferences': ['bar=baz'],
+ 'platforms': [('x86_64', 'linux')],
})
task_template = kwargs.pop('task_template')
@@ -41,7 +43,11 @@ class CreateServiceKwargsTest(unittest.TestCase):
'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
'LogDriver', 'Networks'
])
- assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ assert task_template['Placement'] == {
+ 'Constraints': ['foo=bar'],
+ 'Preferences': ['bar=baz'],
+ 'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}],
+ }
assert task_template['LogDriver'] == {
'Name': 'logdriver',
'Options': {'foo': 'bar'}
diff --git a/tests/unit/types_containers_test.py b/tests/unit/types_containers_test.py
new file mode 100644
index 0000000..b0ad0a7
--- /dev/null
+++ b/tests/unit/types_containers_test.py
@@ -0,0 +1,6 @@
+from docker.types.containers import ContainerConfig
+
+
+def test_uid_0_is_not_elided():
+ x = ContainerConfig(image='i', version='v', command='true', user=0)
+ assert x['User'] == '0'
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
index 50ba383..b0934f9 100644
--- a/tests/unit/utils_config_test.py
+++ b/tests/unit/utils_config_test.py
@@ -4,8 +4,8 @@ import shutil
import tempfile
import json
-from py.test import ensuretemp
-from pytest import mark
+from pytest import mark, fixture
+
from docker.utils import config
try:
@@ -15,25 +15,25 @@ except ImportError:
class FindConfigFileTest(unittest.TestCase):
- def tmpdir(self, name):
- tmpdir = ensuretemp(name)
- self.addCleanup(tmpdir.remove)
- return tmpdir
+
+ @fixture(autouse=True)
+ def tmpdir(self, tmpdir):
+ self.mkdir = tmpdir.mkdir
def test_find_config_fallback(self):
- tmpdir = self.tmpdir('test_find_config_fallback')
+ tmpdir = self.mkdir('test_find_config_fallback')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert config.find_config_file() is None
def test_find_config_from_explicit_path(self):
- tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ tmpdir = self.mkdir('test_find_config_from_explicit_path')
config_path = tmpdir.ensure('my-config-file.json')
assert config.find_config_file(str(config_path)) == str(config_path)
def test_find_config_from_environment(self):
- tmpdir = self.tmpdir('test_find_config_from_environment')
+ tmpdir = self.mkdir('test_find_config_from_environment')
config_path = tmpdir.ensure('config.json')
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
@@ -41,7 +41,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_posix(self):
- tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ tmpdir = self.mkdir('test_find_config_from_home_posix')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
@@ -49,7 +49,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_legacy_name(self):
- tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ tmpdir = self.mkdir('test_find_config_from_home_legacy_name')
config_path = tmpdir.ensure('.dockercfg')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
@@ -57,7 +57,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform != 'win32'")
def test_find_config_from_home_windows(self):
- tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ tmpdir = self.mkdir('test_find_config_from_home_windows')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
diff --git a/tests/unit/utils_proxy_test.py b/tests/unit/utils_proxy_test.py
new file mode 100644
index 0000000..ff0e14b
--- /dev/null
+++ b/tests/unit/utils_proxy_test.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+import six
+
+from docker.utils.proxy import ProxyConfig
+
+HTTP = 'http://test:80'
+HTTPS = 'https://test:443'
+FTP = 'ftp://user:password@host:23'
+NO_PROXY = 'localhost,.localdomain'
+CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY)
+ENV = {
+ 'http_proxy': HTTP,
+ 'HTTP_PROXY': HTTP,
+ 'https_proxy': HTTPS,
+ 'HTTPS_PROXY': HTTPS,
+ 'ftp_proxy': FTP,
+ 'FTP_PROXY': FTP,
+ 'no_proxy': NO_PROXY,
+ 'NO_PROXY': NO_PROXY,
+}
+
+
+class ProxyConfigTest(unittest.TestCase):
+
+ def test_from_dict(self):
+ config = ProxyConfig.from_dict({
+ 'httpProxy': HTTP,
+ 'httpsProxy': HTTPS,
+ 'ftpProxy': FTP,
+ 'noProxy': NO_PROXY
+ })
+ self.assertEqual(CONFIG.http, config.http)
+ self.assertEqual(CONFIG.https, config.https)
+ self.assertEqual(CONFIG.ftp, config.ftp)
+ self.assertEqual(CONFIG.no_proxy, config.no_proxy)
+
+ def test_new(self):
+ config = ProxyConfig()
+ self.assertIsNone(config.http)
+ self.assertIsNone(config.https)
+ self.assertIsNone(config.ftp)
+ self.assertIsNone(config.no_proxy)
+
+ config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d')
+ self.assertEqual(config.http, 'a')
+ self.assertEqual(config.https, 'b')
+ self.assertEqual(config.ftp, 'c')
+ self.assertEqual(config.no_proxy, 'd')
+
+ def test_truthiness(self):
+ assert not ProxyConfig()
+ assert ProxyConfig(http='non-zero')
+ assert ProxyConfig(https='non-zero')
+ assert ProxyConfig(ftp='non-zero')
+ assert ProxyConfig(no_proxy='non-zero')
+
+ def test_environment(self):
+ self.assertDictEqual(CONFIG.get_environment(), ENV)
+ empty = ProxyConfig()
+ self.assertDictEqual(empty.get_environment(), {})
+
+ def test_inject_proxy_environment(self):
+ # Proxy config is non null, env is None.
+ self.assertSetEqual(
+ set(CONFIG.inject_proxy_environment(None)),
+ set(['{}={}'.format(k, v) for k, v in six.iteritems(ENV)]))
+
+ # Proxy config is null, env is None.
+ self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None)
+
+ env = ['FOO=BAR', 'BAR=BAZ']
+
+ # Proxy config is non null, env is non null
+ actual = CONFIG.inject_proxy_environment(env)
+ expected = ['{}={}'.format(k, v) for k, v in six.iteritems(ENV)] + env
+ # It's important that the first 8 variables are the ones from the proxy
+ # config, and the last 2 are the ones from the input environment
+ self.assertSetEqual(set(actual[:8]), set(expected[:8]))
+ self.assertSetEqual(set(actual[-2:]), set(expected[-2:]))
+
+ # Proxy is null, and is non null
+ self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env)
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 8880cfe..d9cb002 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -11,6 +11,7 @@ import unittest
from docker.api.client import APIClient
+from docker.constants import IS_WINDOWS_PLATFORM
from docker.errors import DockerException
from docker.utils import (
convert_filters, convert_volume_binds, decode_json_header, kwargs_from_env,
@@ -83,15 +84,17 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
- assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
assert 'ca.pem' in kwargs['tls'].ca_cert
assert 'cert.pem' in kwargs['tls'].cert[0]
assert 'key.pem' in kwargs['tls'].cert[1]
assert kwargs['tls'].assert_hostname is False
assert kwargs['tls'].verify
+
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
try:
client = APIClient(**kwargs)
- assert kwargs['base_url'] == client.base_url
+ assert parsed_host == client.base_url
assert kwargs['tls'].ca_cert == client.verify
assert kwargs['tls'].cert == client.cert
except TypeError as e:
@@ -102,15 +105,16 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
- assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
assert 'ca.pem' in kwargs['tls'].ca_cert
assert 'cert.pem' in kwargs['tls'].cert[0]
assert 'key.pem' in kwargs['tls'].cert[1]
assert kwargs['tls'].assert_hostname is True
assert kwargs['tls'].verify is False
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
try:
client = APIClient(**kwargs)
- assert kwargs['base_url'] == client.base_url
+ assert parsed_host == client.base_url
assert kwargs['tls'].cert == client.cert
assert not kwargs['tls'].verify
except TypeError as e:
@@ -272,6 +276,11 @@ class ParseHostTest(unittest.TestCase):
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
+ 'ssh://:22/path',
+ 'tcp://netloc:3333/path?q=1',
+ 'unix:///sock/path#fragment',
+ 'https://netloc:3333/path;params',
+ 'ssh://:clearpassword@host:22',
]
valid_hosts = {
@@ -281,7 +290,7 @@ class ParseHostTest(unittest.TestCase):
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
- 'unix://': 'http+unix://var/run/docker.sock',
+ 'unix://': 'http+unix:///var/run/docker.sock',
'12.234.45.127:2375/docker/engine': (
'http://12.234.45.127:2375/docker/engine'
),
@@ -294,6 +303,9 @@ class ParseHostTest(unittest.TestCase):
'[fd12::82d1]:2375/docker/engine': (
'http://[fd12::82d1]:2375/docker/engine'
),
+ 'ssh://': 'ssh://127.0.0.1:22',
+ 'ssh://user@localhost:22': 'ssh://user@localhost:22',
+ 'ssh://user@remote': 'ssh://user@remote:22',
}
for host in invalid_hosts:
@@ -304,7 +316,7 @@ class ParseHostTest(unittest.TestCase):
assert parse_host(host, None) == expected
def test_parse_host_empty_value(self):
- unix_socket = 'http+unix://var/run/docker.sock'
+ unix_socket = 'http+unix:///var/run/docker.sock'
npipe = 'npipe:////./pipe/docker_engine'
for val in [None, '']:
@@ -449,8 +461,8 @@ class UtilsTest(unittest.TestCase):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
- ({'exited': 0}, '{"exited": [0]}'),
- ({'exited': [0, 1]}, '{"exited": [0, 1]}'),
+ ({'exited': 0}, '{"exited": ["0"]}'),
+ ({'exited': [0, 1]}, '{"exited": ["0", "1"]}'),
]
for filters, expected in tests:
@@ -483,9 +495,12 @@ class PortsTest(unittest.TestCase):
assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_protocol(self):
- internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
- assert internal_port == ["2000/udp"]
- assert external_port == [("127.0.0.1", "1000")]
+ for protocol in ['tcp', 'udp', 'sctp']:
+ internal_port, external_port = split_port(
+ "127.0.0.1:1000:2000/" + protocol
+ )
+ assert internal_port == ["2000/" + protocol]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
@@ -538,6 +553,10 @@ class PortsTest(unittest.TestCase):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000:tcp")
+ def test_split_port_invalid_protocol(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000/ftp")
+
def test_non_matching_length_port_ranges(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000-1010:2000-2002/tcp")