summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MANIFEST.in1
-rw-r--r--PKG-INFO130
-rw-r--r--README.md4
-rw-r--r--README.rst94
-rw-r--r--debian/changelog55
-rw-r--r--debian/clean1
-rw-r--r--debian/compat1
-rw-r--r--debian/control28
-rw-r--r--debian/copyright2
-rw-r--r--debian/patches/requirements.patch81
-rwxr-xr-xdebian/rules6
-rw-r--r--debian/tests/control6
-rw-r--r--debian/upstream/metadata1
-rw-r--r--docker.egg-info/PKG-INFO130
-rw-r--r--docker.egg-info/SOURCES.txt27
-rw-r--r--docker.egg-info/requires.txt22
-rw-r--r--docker/__init__.py3
-rw-r--r--docker/api/build.py69
-rw-r--r--docker/api/client.py167
-rw-r--r--docker/api/config.py17
-rw-r--r--docker/api/container.py192
-rw-r--r--docker/api/daemon.py30
-rw-r--r--docker/api/exec_api.py20
-rw-r--r--docker/api/image.py105
-rw-r--r--docker/api/network.py16
-rw-r--r--docker/api/plugin.py6
-rw-r--r--docker/api/secret.py9
-rw-r--r--docker/api/service.py66
-rw-r--r--docker/api/swarm.py107
-rw-r--r--docker/api/volume.py8
-rw-r--r--docker/auth.py416
-rw-r--r--docker/client.py28
-rw-r--r--docker/constants.py28
-rw-r--r--docker/context/__init__.py3
-rw-r--r--docker/context/api.py203
-rw-r--r--docker/context/config.py81
-rw-r--r--docker/context/context.py243
-rw-r--r--docker/credentials/__init__.py4
-rw-r--r--docker/credentials/constants.py4
-rw-r--r--docker/credentials/errors.py25
-rw-r--r--docker/credentials/store.py94
-rw-r--r--docker/credentials/utils.py38
-rw-r--r--docker/errors.py61
-rw-r--r--docker/models/configs.py2
-rw-r--r--docker/models/containers.py122
-rw-r--r--docker/models/images.py101
-rw-r--r--docker/models/networks.py5
-rw-r--r--docker/models/plugins.py7
-rw-r--r--docker/models/resource.py9
-rw-r--r--docker/models/secrets.py3
-rw-r--r--docker/models/services.py104
-rw-r--r--docker/models/swarm.py34
-rw-r--r--docker/tls.py16
-rw-r--r--docker/transport/__init__.py11
-rw-r--r--docker/transport/basehttpadapter.py8
-rw-r--r--docker/transport/npipeconn.py33
-rw-r--r--docker/transport/npipesocket.py24
-rw-r--r--docker/transport/sshconn.py255
-rw-r--r--docker/transport/ssladapter.py8
-rw-r--r--docker/transport/unixconn.py48
-rw-r--r--docker/types/__init__.py9
-rw-r--r--docker/types/base.py5
-rw-r--r--docker/types/containers.py245
-rw-r--r--docker/types/daemon.py18
-rw-r--r--docker/types/healthcheck.py14
-rw-r--r--docker/types/networks.py11
-rw-r--r--docker/types/services.py150
-rw-r--r--docker/utils/build.py30
-rw-r--r--docker/utils/config.py6
-rw-r--r--docker/utils/decorators.py2
-rw-r--r--docker/utils/fnmatch.py2
-rw-r--r--docker/utils/json_stream.py13
-rw-r--r--docker/utils/ports.py10
-rw-r--r--docker/utils/proxy.py73
-rw-r--r--docker/utils/socket.py107
-rw-r--r--docker/utils/utils.py211
-rw-r--r--docker/version.py4
-rw-r--r--requirements.txt16
-rw-r--r--setup.py55
-rw-r--r--test-requirements.txt11
-rw-r--r--tests/gpg-keys/ownertrust3
-rw-r--r--tests/gpg-keys/secretbin0 -> 966 bytes
-rw-r--r--tests/helpers.py26
-rw-r--r--tests/integration/api_build_test.py78
-rw-r--r--tests/integration/api_client_test.py4
-rw-r--r--tests/integration/api_config_test.py17
-rw-r--r--tests/integration/api_container_test.py362
-rw-r--r--tests/integration/api_exec_test.py181
-rw-r--r--tests/integration/api_healthcheck_test.py10
-rw-r--r--tests/integration/api_image_test.py43
-rw-r--r--tests/integration/api_network_test.py41
-rw-r--r--tests/integration/api_plugin_test.py16
-rw-r--r--tests/integration/api_secret_test.py4
-rw-r--r--tests/integration/api_service_test.py233
-rw-r--r--tests/integration/api_swarm_test.py56
-rw-r--r--tests/integration/base.py84
-rw-r--r--tests/integration/conftest.py14
-rw-r--r--tests/integration/context_api_test.py59
-rw-r--r--tests/integration/credentials/__init__.py0
-rw-r--r--tests/integration/credentials/store_test.py86
-rw-r--r--tests/integration/credentials/utils_test.py22
-rw-r--r--tests/integration/errors_test.py4
-rw-r--r--tests/integration/models_containers_test.py102
-rw-r--r--tests/integration/models_images_test.py61
-rw-r--r--tests/integration/models_services_test.py38
-rw-r--r--tests/integration/models_swarm_test.py12
-rw-r--r--tests/integration/regression_test.py22
-rw-r--r--tests/ssh/__init__.py0
-rw-r--r--tests/ssh/api_build_test.py590
-rw-r--r--tests/ssh/base.py130
-rw-r--r--tests/unit/api_build_test.py82
-rw-r--r--tests/unit/api_container_test.py89
-rw-r--r--tests/unit/api_exec_test.py10
-rw-r--r--tests/unit/api_image_test.py25
-rw-r--r--tests/unit/api_network_test.py24
-rw-r--r--tests/unit/api_test.py232
-rw-r--r--tests/unit/api_volume_test.py4
-rw-r--r--tests/unit/auth_test.py458
-rw-r--r--tests/unit/client_test.py188
-rw-r--r--tests/unit/context_test.py49
-rw-r--r--tests/unit/dockertypes_test.py12
-rw-r--r--tests/unit/errors_test.py43
-rw-r--r--tests/unit/fake_api.py111
-rw-r--r--tests/unit/fake_api_client.py13
-rw-r--r--tests/unit/models_containers_test.py20
-rw-r--r--tests/unit/models_images_test.py47
-rw-r--r--tests/unit/models_resources_test.py2
-rw-r--r--tests/unit/models_secrets_test.py11
-rw-r--r--tests/unit/models_services_test.py18
-rw-r--r--tests/unit/sshadapter_test.py39
-rw-r--r--tests/unit/ssladapter_test.py38
-rw-r--r--tests/unit/swarm_test.py2
-rw-r--r--tests/unit/types_containers_test.py6
-rw-r--r--tests/unit/utils_build_test.py114
-rw-r--r--tests/unit/utils_config_test.py26
-rw-r--r--tests/unit/utils_json_stream_test.py12
-rw-r--r--tests/unit/utils_proxy_test.py81
-rw-r--r--tests/unit/utils_test.py107
138 files changed, 6412 insertions, 2163 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index 41b3fa9..2ba6e02 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -6,3 +6,4 @@ include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
recursive-include tests/integration/testdata *
+recursive-include tests/gpg-keys *
diff --git a/PKG-INFO b/PKG-INFO
index d8c4409..58deb13 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,105 +1,92 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
Name: docker
-Version: 3.4.1
+Version: 5.0.3
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Author: Joffrey F
-Author-email: joffrey@docker.com
+Maintainer: Ulysses Souza
+Maintainer-email: ulysses.souza@docker.com
License: Apache License 2.0
-Description: Docker SDK for Python
- =====================
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Description: # Docker SDK for Python
- |Build Status|
+ [![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
- A Python library for the Docker Engine API. It lets you do anything the
- ``docker`` command does, but from within Python apps – run containers,
- manage containers, manage Swarms, etc.
+ A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
- Installation
- ------------
+ ## Installation
- The latest stable version `is available on
- PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
- your ``requirements.txt`` file or install with pip:
-
- ::
+ The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
- If you are intending to connect to a docker host via TLS, add
- ``docker[tls]`` to your requirements instead, or install with pip:
-
- ::
+ If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
- Usage
- -----
-
- Connect to Docker using the default socket or the configuration in your
- environment:
+ ## Usage
- .. code:: python
+ Connect to Docker using the default socket or the configuration in your environment:
- import docker
- client = docker.from_env()
+ ```python
+ import docker
+ client = docker.from_env()
+ ```
You can run containers:
- .. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
+ ```python
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+ ```
You can run containers in the background:
- .. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
+ ```python
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+ ```
You can manage containers:
- .. code:: python
+ ```python
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+ >>> container = client.containers.get('45e6d2de7c54')
- >>> container = client.containers.get('45e6d2de7c54')
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
+ >>> container.logs()
+ "Reticulating spline 1...\n"
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
+ >>> container.stop()
+ ```
You can stream logs:
- .. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
+ ```python
+ >>> for line in container.logs(stream=True):
+ ... print(line.strip())
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+ ```
You can manage images:
- .. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```python
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
- `Read the full documentation <https://docker-py.readthedocs.io>`__ to
- see everything you can do.
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
+ [Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
@@ -107,12 +94,15 @@ Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+Provides-Extra: tls
+Provides-Extra: ssh
diff --git a/README.md b/README.md
index 3ff124d..4fc31f7 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# Docker SDK for Python
-[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
@@ -58,7 +58,7 @@ You can stream logs:
```python
>>> for line in container.logs(stream=True):
-... print line.strip()
+... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...
diff --git a/README.rst b/README.rst
deleted file mode 100644
index d0117e6..0000000
--- a/README.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-Docker SDK for Python
-=====================
-
-|Build Status|
-
-A Python library for the Docker Engine API. It lets you do anything the
-``docker`` command does, but from within Python apps – run containers,
-manage containers, manage Swarms, etc.
-
-Installation
-------------
-
-The latest stable version `is available on
-PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
-your ``requirements.txt`` file or install with pip:
-
-::
-
- pip install docker
-
-If you are intending to connect to a docker host via TLS, add
-``docker[tls]`` to your requirements instead, or install with pip:
-
-::
-
- pip install docker[tls]
-
-Usage
------
-
-Connect to Docker using the default socket or the configuration in your
-environment:
-
-.. code:: python
-
- import docker
- client = docker.from_env()
-
-You can run containers:
-
-.. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
-
-You can run containers in the background:
-
-.. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
-
-You can manage containers:
-
-.. code:: python
-
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
-
- >>> container = client.containers.get('45e6d2de7c54')
-
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
-
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
-
-You can stream logs:
-
-.. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
-
-You can manage images:
-
-.. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
-
-`Read the full documentation <https://docker-py.readthedocs.io>`__ to
-see everything you can do.
-
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
diff --git a/debian/changelog b/debian/changelog
index 915b183..68b859b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,58 @@
+python-docker (5.0.3-1) unstable; urgency=medium
+
+ [ Andrej Shadura ]
+ * New upstream release.
+ * Refresh the patch.
+ * Drop outdated dependency on python3-six.
+ * Build-depend on dh-sequence-python3 instead of --with python3.
+ * Clean eggs in debian/clean.
+
+ [ Anthony Fok ]
+ * Remove dependency on python3-dockerpycreds.
+
+ -- Andrej Shadura <andrewsh@debian.org> Tue, 12 Oct 2021 10:36:52 +0200
+
+python-docker (4.4.4-1) unstable; urgency=medium
+
+ * Upload to unstable.
+ * Add myself as uploader.
+
+ -- Thomas Goirand <zigo@debian.org> Mon, 16 Aug 2021 13:12:56 +0200
+
+python-docker (4.4.4-0.1) experimental; urgency=medium
+
+ * Non-maintainer upload.
+ * New upstream release.
+ * Rebased requirements.patch.
+
+ -- Thomas Goirand <zigo@debian.org> Sat, 27 Mar 2021 19:55:48 +0100
+
+python-docker (4.1.0-1.2) unstable; urgency=medium
+
+ * Uploading source-only.
+
+ -- Thomas Goirand <zigo@debian.org> Fri, 08 May 2020 10:36:18 +0200
+
+python-docker (4.1.0-1.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Add python3-distutils runtime depends. (Closes: #958577)
+
+ -- Thomas Goirand <zigo@debian.org> Fri, 08 May 2020 10:32:27 +0200
+
+python-docker (4.1.0-1) unstable; urgency=medium
+
+ * New upstream version 4.1.0
+ - Refresh patches
+ * Use secure copyright file specification URI.
+ * Set upstream metadata fields: Repository.
+ * Bump debhelper from old 10 to 12.
+ * Bump Standards Version
+ * Drop Python3-Version field.
+ Minimum required version is shipped in oldstable
+
+ -- Felipe Sateler <fsateler@debian.org> Fri, 22 Nov 2019 21:10:10 -0300
+
python-docker (3.4.1-4.1) unstable; urgency=medium
* Non-maintainer upload.
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 0000000..45149aa
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1 @@
+*.egg-info/*
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
index 28613a6..401b125 100644
--- a/debian/control
+++ b/debian/control
@@ -2,30 +2,28 @@ Source: python-docker
Section: python
Priority: optional
Maintainer: Docker Compose Team <team+docker-compose@tracker.debian.org>
-Uploaders: Jason Pleau <jason@jpleau.ca>,
- Felipe Sateler <fsateler@debian.org>
-Build-Depends: debhelper (>= 10),
- dh-python,
+Uploaders:
+ Jason Pleau <jason@jpleau.ca>,
+ Felipe Sateler <fsateler@debian.org>,
+ Thomas Goirand <zigo@debian.org>,
+Build-Depends:
+ debhelper-compat (= 12),
+ dh-sequence-python3,
python3-all,
+ python3-requests (>= 2.14.2~),
python3-setuptools,
-# setup.py - python3
- python3-requests (>= 2.11.1~),
- python3-six (>= 1.4.0~),
python3-websocket (>= 0.32.0~),
- python3-dockerpycreds (>= 0.2.2),
-# test-requirements.txt
-# python-mock (>= 1.0.1~),
-# python-coverage (>= 3.7.1~),
-# docker.io,
-Standards-Version: 4.2.1
+Standards-Version: 4.4.1
Homepage: https://github.com/docker/docker-py
Vcs-Git: https://salsa.debian.org/docker-compose-team/python-docker
Vcs-Browser: https://salsa.debian.org/docker-compose-team/python-docker
-X-Python3-Version: >= 3.5
Package: python3-docker
Architecture: all
-Depends: ${misc:Depends}, ${python3:Depends}
+Depends:
+ python3-distutils,
+ ${misc:Depends},
+ ${python3:Depends},
Description: Python 3 wrapper to access docker.io's control socket
This package contains oodles of routines that aid in controlling
docker.io over it's socket control, the same way the docker.io
diff --git a/debian/copyright b/debian/copyright
index 75d9002..ff1cfa0 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -1,4 +1,4 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: docker
Upstream-Contact: Docker, Inc.
Source: https://github.com/docker/docker-py
diff --git a/debian/patches/requirements.patch b/debian/patches/requirements.patch
index e20baa4..e018d09 100644
--- a/debian/patches/requirements.patch
+++ b/debian/patches/requirements.patch
@@ -5,58 +5,79 @@ Subject: Unpin dependencies so newer versions satisfy them appropriately
Forwarded: https://github.com/dotcloud/docker-py/issues/101 (upstream has no interest)
Patch-Name: requirements.patch
+---
+ requirements.txt | 33 ++++++++++++++++-----------------
+ setup.py | 2 +-
+ test-requirements.txt | 14 +++++++-------
+ 3 files changed, 24 insertions(+), 25 deletions(-)
+diff --git a/requirements.txt b/requirements.txt
+index 26cbc6f..308ac48 100644
--- a/requirements.txt
+++ b/requirements.txt
-@@ -1,18 +1,18 @@
+@@ -1,17 +1,16 @@
-appdirs==1.4.3
-asn1crypto==0.22.0
-backports.ssl-match-hostname==3.5.0.1
--cffi==1.10.0
--cryptography==1.9
--docker-pycreds==0.3.0
+-cffi==1.14.4
+-cryptography==3.4.7
-enum34==1.1.6
-idna==2.5
-ipaddress==1.0.18
-packaging==16.8
+-paramiko==2.4.2
-pycparser==2.17
--pyOpenSSL==17.0.0
+-pyOpenSSL==18.0.0
-pyparsing==2.2.0
--pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
--pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
--requests==2.14.2
--six==1.10.0
--websocket-client==0.40.0
+-pywin32==301; sys_platform == 'win32'
+-requests==2.26.0
+-urllib3==1.26.5
+-websocket-client==0.56.0
+appdirs>=1.4.3
+asn1crypto>=0.22.0
+backports.ssl-match-hostname>=3.5.0.1
-+cffi>=1.10.0
-+cryptography>=1.9
-+docker-pycreds>=0.3.0
++cffi>=1.14.4
++cryptography>=3.2
+enum34>=1.1.6
+idna>=2.5
+ipaddress>=1.0.18
+packaging>=16.8
++paramiko>=2.4.2
+pycparser>=2.17
-+pyOpenSSL>=17.0.0
++pyOpenSSL>=18.0.0
+pyparsing>=2.2.0
-+pypiwin32>=219; sys_platform == 'win32' and python_version < '3.6'
-+pypiwin32>=220; sys_platform == 'win32' and python_version >= '3.6'
-+requests>=2.14.2
-+six>=1.10.0
-+websocket-client>=0.40.0
++requests>=2.25.0
++urllib3>=1.26.5
++websocket-client>=0.56.0
+diff --git a/setup.py b/setup.py
+index a966fea..5b9945d 100644
+--- a/setup.py
++++ b/setup.py
+@@ -26,7 +26,7 @@ extras_require = {
+ # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
+ # installing the extra dependencies, install the following instead:
+ # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
+- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],
++ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.2', 'idna>=2.0.0'],
+
+ # Only required when connecting using the ssh:// protocol
+ 'ssh': ['paramiko>=2.4.2'],
+diff --git a/test-requirements.txt b/test-requirements.txt
+index 40161bb..585e3fd 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
-@@ -1,6 +1,6 @@
--coverage==3.7.1
--flake8==3.4.1
+@@ -1,7 +1,7 @@
+-setuptools==54.1.1
+-coverage==4.5.2
+-flake8==3.6.0
-mock==1.0.1
--pytest==2.9.1
--pytest-cov==2.1.0
--pytest-timeout==1.2.1
-+coverage>=3.7.1
-+flake8>=3.4.1
+-pytest==4.3.1
+-pytest-cov==2.6.1
+-pytest-timeout==1.3.3
++setuptools>=54.1.1
++coverage>=4.5.2
++flake8>=3.6.0
+mock>=1.0.1
-+pytest>=2.9.1
-+pytest-cov>=2.1.0
-+pytest-timeout>=1.2.1
++pytest>=4.3.1
++pytest-cov>=2.6.1
++pytest-timeout>=1.3.3
diff --git a/debian/rules b/debian/rules
index 9ffd4ee..ae44baf 100755
--- a/debian/rules
+++ b/debian/rules
@@ -5,9 +5,5 @@ export PYBUILD_NAME=docker
# The pypi bundle includes the tests (https://github.com/docker/docker-py/issues/308), but they're really invasive and fail for reasons I haven't yet diagnosed.
export PYBUILD_DISABLE=test
-override_dh_auto_clean:
- dh_auto_clean
- rm -f docker_py.egg-info/SOURCES.txt
-
%:
- dh $@ --with python3 --buildsystem pybuild
+ dh $@ --buildsystem pybuild
diff --git a/debian/tests/control b/debian/tests/control
index 6163e1b..a94503d 100644
--- a/debian/tests/control
+++ b/debian/tests/control
@@ -1,3 +1,7 @@
Tests: integration
-Depends: docker.io, python3-mock, python3-pytest, @
+Depends:
+ docker.io,
+ python3-mock,
+ python3-pytest,
+ @,
Restrictions: isolation-machine needs-root
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
new file mode 100644
index 0000000..aebbc14
--- /dev/null
+++ b/debian/upstream/metadata
@@ -0,0 +1 @@
+Repository: https://github.com/docker/docker-py
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
index d8c4409..58deb13 100644
--- a/docker.egg-info/PKG-INFO
+++ b/docker.egg-info/PKG-INFO
@@ -1,105 +1,92 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
Name: docker
-Version: 3.4.1
+Version: 5.0.3
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Author: Joffrey F
-Author-email: joffrey@docker.com
+Maintainer: Ulysses Souza
+Maintainer-email: ulysses.souza@docker.com
License: Apache License 2.0
-Description: Docker SDK for Python
- =====================
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Description: # Docker SDK for Python
- |Build Status|
+ [![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
- A Python library for the Docker Engine API. It lets you do anything the
- ``docker`` command does, but from within Python apps – run containers,
- manage containers, manage Swarms, etc.
+ A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
- Installation
- ------------
+ ## Installation
- The latest stable version `is available on
- PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
- your ``requirements.txt`` file or install with pip:
-
- ::
+ The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
- If you are intending to connect to a docker host via TLS, add
- ``docker[tls]`` to your requirements instead, or install with pip:
-
- ::
+ If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
- Usage
- -----
-
- Connect to Docker using the default socket or the configuration in your
- environment:
+ ## Usage
- .. code:: python
+ Connect to Docker using the default socket or the configuration in your environment:
- import docker
- client = docker.from_env()
+ ```python
+ import docker
+ client = docker.from_env()
+ ```
You can run containers:
- .. code:: python
-
- >>> client.containers.run("ubuntu:latest", "echo hello world")
- 'hello world\n'
+ ```python
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+ ```
You can run containers in the background:
- .. code:: python
-
- >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
- <Container '45e6d2de7c54'>
+ ```python
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+ ```
You can manage containers:
- .. code:: python
+ ```python
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
- >>> client.containers.list()
- [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+ >>> container = client.containers.get('45e6d2de7c54')
- >>> container = client.containers.get('45e6d2de7c54')
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
- >>> container.attrs['Config']['Image']
- "bfirsh/reticulate-splines"
+ >>> container.logs()
+ "Reticulating spline 1...\n"
- >>> container.logs()
- "Reticulating spline 1...\n"
-
- >>> container.stop()
+ >>> container.stop()
+ ```
You can stream logs:
- .. code:: python
-
- >>> for line in container.logs(stream=True):
- ... print line.strip()
- Reticulating spline 2...
- Reticulating spline 3...
- ...
+ ```python
+ >>> for line in container.logs(stream=True):
+ ... print(line.strip())
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+ ```
You can manage images:
- .. code:: python
-
- >>> client.images.pull('nginx')
- <Image 'nginx'>
-
- >>> client.images.list()
- [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```python
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
- `Read the full documentation <https://docker-py.readthedocs.io>`__ to
- see everything you can do.
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+ ```
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
- :target: https://travis-ci.org/docker/docker-py
+ [Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
@@ -107,12 +94,15 @@ Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+Provides-Extra: tls
+Provides-Extra: ssh
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
index 02d2f19..0e0320f 100644
--- a/docker.egg-info/SOURCES.txt
+++ b/docker.egg-info/SOURCES.txt
@@ -1,7 +1,6 @@
LICENSE
MANIFEST.in
README.md
-README.rst
requirements.txt
setup.cfg
setup.py
@@ -33,6 +32,15 @@ docker/api/secret.py
docker/api/service.py
docker/api/swarm.py
docker/api/volume.py
+docker/context/__init__.py
+docker/context/api.py
+docker/context/config.py
+docker/context/context.py
+docker/credentials/__init__.py
+docker/credentials/constants.py
+docker/credentials/errors.py
+docker/credentials/store.py
+docker/credentials/utils.py
docker/models/__init__.py
docker/models/configs.py
docker/models/containers.py
@@ -46,8 +54,10 @@ docker/models/services.py
docker/models/swarm.py
docker/models/volumes.py
docker/transport/__init__.py
+docker/transport/basehttpadapter.py
docker/transport/npipeconn.py
docker/transport/npipesocket.py
+docker/transport/sshconn.py
docker/transport/ssladapter.py
docker/transport/unixconn.py
docker/types/__init__.py
@@ -65,10 +75,13 @@ docker/utils/decorators.py
docker/utils/fnmatch.py
docker/utils/json_stream.py
docker/utils/ports.py
+docker/utils/proxy.py
docker/utils/socket.py
docker/utils/utils.py
tests/__init__.py
tests/helpers.py
+tests/gpg-keys/ownertrust
+tests/gpg-keys/secret
tests/integration/__init__.py
tests/integration/api_build_test.py
tests/integration/api_client_test.py
@@ -86,6 +99,7 @@ tests/integration/api_volume_test.py
tests/integration/base.py
tests/integration/client_test.py
tests/integration/conftest.py
+tests/integration/context_api_test.py
tests/integration/errors_test.py
tests/integration/models_containers_test.py
tests/integration/models_images_test.py
@@ -96,8 +110,14 @@ tests/integration/models_services_test.py
tests/integration/models_swarm_test.py
tests/integration/models_volumes_test.py
tests/integration/regression_test.py
+tests/integration/credentials/__init__.py
+tests/integration/credentials/store_test.py
+tests/integration/credentials/utils_test.py
tests/integration/testdata/dummy-plugin/config.json
tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
+tests/ssh/__init__.py
+tests/ssh/api_build_test.py
+tests/ssh/base.py
tests/unit/__init__.py
tests/unit/api_build_test.py
tests/unit/api_container_test.py
@@ -108,6 +128,7 @@ tests/unit/api_test.py
tests/unit/api_volume_test.py
tests/unit/auth_test.py
tests/unit/client_test.py
+tests/unit/context_test.py
tests/unit/dockertypes_test.py
tests/unit/errors_test.py
tests/unit/fake_api.py
@@ -117,12 +138,16 @@ tests/unit/models_containers_test.py
tests/unit/models_images_test.py
tests/unit/models_networks_test.py
tests/unit/models_resources_test.py
+tests/unit/models_secrets_test.py
tests/unit/models_services_test.py
+tests/unit/sshadapter_test.py
tests/unit/ssladapter_test.py
tests/unit/swarm_test.py
+tests/unit/types_containers_test.py
tests/unit/utils_build_test.py
tests/unit/utils_config_test.py
tests/unit/utils_json_stream_test.py
+tests/unit/utils_proxy_test.py
tests/unit/utils_test.py
tests/unit/testdata/certs/ca.pem
tests/unit/testdata/certs/cert.pem
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
index e0b0763..06622f8 100644
--- a/docker.egg-info/requires.txt
+++ b/docker.egg-info/requires.txt
@@ -1,21 +1,13 @@
-requests!=2.18.0,>=2.14.2
-six>=1.4.0
websocket-client>=0.32.0
-docker-pycreds>=0.3.0
-
-[:python_version < "3.3"]
-ipaddress>=1.0.16
-
-[:python_version < "3.5"]
-backports.ssl_match_hostname>=3.5
+requests!=2.18.0,>=2.14.2
-[:sys_platform == "win32" and python_version < "3.6"]
-pypiwin32==219
+[:sys_platform == "win32"]
+pywin32==227
-[:sys_platform == "win32" and python_version >= "3.6"]
-pypiwin32==220
+[ssh]
+paramiko>=2.4.2
[tls]
-pyOpenSSL>=0.14
-cryptography>=1.3.4
+pyOpenSSL>=17.5.0
+cryptography>=3.4.7
idna>=2.0.0
diff --git a/docker/__init__.py b/docker/__init__.py
index cf732e1..e5c1a8f 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,9 @@
# flake8: noqa
from .api import APIClient
from .client import DockerClient, from_env
+from .context import Context
+from .context import ContextAPI
+from .tls import TLSConfig
from .version import version, version_info
__version__ = version
diff --git a/docker/api/build.py b/docker/api/build.py
index 419255f..aac43c4 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -12,14 +12,15 @@ from .. import utils
log = logging.getLogger(__name__)
-class BuildApiMixin(object):
+class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
- squash=None, extra_hosts=None, platform=None, isolation=None):
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
@@ -103,6 +104,10 @@ class BuildApiMixin(object):
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
A generator for the build output.
@@ -116,6 +121,7 @@ class BuildApiMixin(object):
remote = context = None
headers = {}
container_limits = container_limits or {}
+ buildargs = buildargs or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
@@ -126,7 +132,7 @@ class BuildApiMixin(object):
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
- 'Invalid container_limits key {0}'.format(key)
+ f'Invalid container_limits key {key}'
)
if custom_context:
@@ -144,7 +150,7 @@ class BuildApiMixin(object):
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
- with open(dockerignore, 'r') as f:
+ with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
@@ -168,6 +174,10 @@ class BuildApiMixin(object):
}
params.update(container_limits)
+ if use_config_proxy:
+ proxy_args = self._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
if buildargs:
params.update({'buildargs': json.dumps(buildargs)})
@@ -286,40 +296,32 @@ class BuildApiMixin(object):
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
- if not self._auth_configs:
+ if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
- self._auth_configs = auth.load_config()
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
- auth_data = {}
- if self._auth_configs.get('credsStore'):
- # Using a credentials store, we need to retrieve the
- # credentials for each registry listed in the config.json file
- # Matches CLI behavior: https://github.com/docker/docker/blob/
- # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
- # credentials/native_store.go#L68-L83
- for registry in self._auth_configs.get('auths', {}).keys():
- auth_data[registry] = auth.resolve_authconfig(
- self._auth_configs, registry,
- credstore_env=self.credstore_env,
- )
- else:
- auth_data = self._auth_configs.get('auths', {}).copy()
- # See https://github.com/docker/docker-py/issues/1683
- if auth.INDEX_NAME in auth_data:
- auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
- 'Sending auth config ({0})'.format(
+ 'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
- headers['X-Registry-Config'] = auth.encode_header(
- auth_data
- )
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
else:
log.debug('No auth config found')
@@ -331,13 +333,20 @@ def process_dockerfile(dockerfile, path):
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
-
+ if constants.IS_WINDOWS_PLATFORM and path.startswith(
+ constants.WINDOWS_LONGPATH_PREFIX):
+ abs_dockerfile = '{}{}'.format(
+ constants.WINDOWS_LONGPATH_PREFIX,
+ os.path.normpath(
+ abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]
+ )
+ )
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
- with open(abs_dockerfile, 'r') as df:
+ with open(abs_dockerfile) as df:
return (
- '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
diff --git a/docker/api/client.py b/docker/api/client.py
index 91da1c8..2667922 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -1,12 +1,25 @@
import json
import struct
+import urllib
from functools import partial
import requests
import requests.exceptions
-import six
import websocket
+from .. import auth
+from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
+from ..errors import (DockerException, InvalidVersion, TLSParameterError,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
+from ..utils import check_resource, config, update_headers, utils
+from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
@@ -19,23 +32,14 @@ from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
-from .. import auth
-from ..constants import (
- DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
- DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
- MINIMUM_DOCKER_API_VERSION
-)
-from ..errors import (
- DockerException, InvalidVersion, TLSParameterError,
- create_api_error_from_http_exception
-)
-from ..tls import TLSConfig
-from ..transport import SSLAdapter, UnixAdapter
-from ..utils import utils, check_resource, update_headers, config
-from ..utils.socket import frames_iter, socket_raw_iter
-from ..utils.json_stream import json_stream
+
+try:
+ from ..transport import NpipeHTTPAdapter
+except ImportError:
+ pass
+
try:
- from ..transport import NpipeAdapter
+ from ..transport import SSHHTTPAdapter
except ImportError:
pass
@@ -76,7 +80,7 @@ class APIClient(
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -85,6 +89,11 @@ class APIClient(
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@@ -95,9 +104,10 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
- credstore_env=None):
- super(APIClient, self).__init__()
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super().__init__()
if tls and not base_url:
raise TLSParameterError(
@@ -109,17 +119,31 @@ class APIClient(
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
self._auth_configs = auth.load_config(
- config_dict=self._general_configs
+ config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
- base_url, timeout, pool_connections=num_pools
+ self._custom_adapter = UnixHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
@@ -132,8 +156,9 @@ class APIClient(
'The npipe:// protocol is only supported on Windows'
)
try:
- self._custom_adapter = NpipeAdapter(
- base_url, timeout, pool_connections=num_pools
+ self._custom_adapter = NpipeHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
except NameError:
raise DockerException(
@@ -141,26 +166,40 @@ class APIClient(
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ try:
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
+ )
+ except NameError:
+ raise DockerException(
+ 'Install paramiko package to enable ssh:// support'
+ )
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
- self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self._custom_adapter = SSLHTTPAdapter(
+ pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
- if version is None:
- self._version = DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
+ if version is None or (isinstance(
+ version,
+ str
+ ) and version.lower() == 'auto'):
+ self._version = self._retrieve_server_version()
else:
+ self._version = version
+ if not isinstance(self._version, str):
raise DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
+ 'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
@@ -180,7 +219,7 @@ class APIClient(
)
except Exception as e:
raise DockerException(
- 'Error while fetching server API version: {0}'.format(e)
+ f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
@@ -207,21 +246,21 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
- if not isinstance(arg, six.string_types):
+ if not isinstance(arg, str):
raise ValueError(
- 'Expected a string but found {0} ({1}) '
+ 'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
- quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
+ return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+ return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -245,7 +284,7 @@ class APIClient(
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
- for k, v in six.iteritems(data):
+ for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
@@ -279,12 +318,12 @@ class APIClient(
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
- elif six.PY3:
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
+ else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
@@ -302,8 +341,7 @@ class APIClient(
if response.raw._fp.chunked:
if decode:
- for chunk in json_stream(self._stream_helper(response, False)):
- yield chunk
+ yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
@@ -359,22 +397,31 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
- for out in response.iter_content(chunk_size, decode):
- yield out
- def _read_from_socket(self, response, stream, tty=False):
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ yield from response.iter_content(chunk_size, decode)
+
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
- gen = None
- if tty is False:
- gen = frames_iter(socket)
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
else:
- gen = socket_raw_iter(socket)
+ # The generator will output strings
+ gen = (data for (_, data) in gen)
if stream:
return gen
else:
- return six.binary_type().join(gen)
+ # Wait for all the frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
@@ -420,7 +467,7 @@ class APIClient(
self._result(res, binary=True)
self._raise_for_status(res)
- sep = six.binary_type()
+ sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
@@ -434,7 +481,7 @@ class APIClient(
def get_adapter(self, url):
try:
- return super(APIClient, self).get_adapter(url)
+ return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
@@ -452,9 +499,11 @@ class APIClient(
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
- self._auth_configs = auth.load_config(dockercfg_path)
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
diff --git a/docker/api/config.py b/docker/api/config.py
index 767bef2..88c367e 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -1,13 +1,11 @@
import base64
-import six
-
from .. import utils
-class ConfigApiMixin(object):
+class ConfigApiMixin:
@utils.minimum_version('1.30')
- def create_config(self, name, data, labels=None):
+ def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
@@ -15,6 +13,9 @@ class ConfigApiMixin(object):
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
+ templating (dict): dictionary containing the name of the
+ templating driver to be used expressed as
+ { name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
@@ -22,12 +23,12 @@ class ConfigApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
- 'Labels': labels
+ 'Labels': labels,
+ 'Templating': templating
}
url = self._url('/configs/create')
@@ -42,7 +43,7 @@ class ConfigApiMixin(object):
Retrieve config metadata
Args:
- id (string): Full ID of the config to remove
+ id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/container.py b/docker/api/container.py
index d4f75f5..83fcd4f 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,19 +1,19 @@
-import six
from datetime import datetime
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
-from ..types import (
- CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
- NetworkingConfig
-)
+from ..types import CancellableStream
+from ..types import ContainerConfig
+from ..types import EndpointConfig
+from ..types import HostConfig
+from ..types import NetworkingConfig
-class ContainerApiMixin(object):
+class ContainerApiMixin:
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
- stream=False, logs=False):
+ stream=False, logs=False, demux=False):
"""
Attach to a container.
@@ -28,11 +28,15 @@ class ContainerApiMixin(object):
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
+ demux (bool): Keep stdout and stderr separate.
Returns:
- By default, the container's output as a single string.
+ By default, the container's output as a single string (two if
+ ``demux=True``: one for stdout and one for stderr).
- If ``stream=True``, an iterator of output strings.
+ If ``stream=True``, an iterator of output strings. If
+ ``demux=True``, two iterators are returned: one for stdout and one
+ for stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -54,8 +58,7 @@ class ContainerApiMixin(object):
response = self._post(u, headers=headers, params=params, stream=True)
output = self._read_from_socket(
- response, stream, self._check_is_tty(container)
- )
+ response, stream, self._check_is_tty(container), demux=demux)
if stream:
return CancellableStream(output, response)
@@ -169,7 +172,8 @@ class ContainerApiMixin(object):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -218,7 +222,8 @@ class ContainerApiMixin(object):
working_dir=None, domainname=None, host_config=None,
mac_address=None, labels=None, stop_signal=None,
networking_config=None, healthcheck=None,
- stop_timeout=None, runtime=None):
+ stop_timeout=None, runtime=None,
+ use_config_proxy=True):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@@ -237,9 +242,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
@@ -251,22 +256,22 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
Or without host port assignment:
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
@@ -276,7 +281,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
@@ -284,7 +289,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
@@ -300,9 +305,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds={
+ host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
@@ -319,9 +324,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds=[
+ host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
@@ -339,15 +344,15 @@ class ContainerApiMixin(object):
.. code-block:: python
- networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config(
+ networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
- ctnr = docker_client.create_container(
+ ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -387,6 +392,10 @@ class ContainerApiMixin(object):
runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being created.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
@@ -397,9 +406,17 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ if use_config_proxy:
+ environment = self._proxy_configs.inject_proxy_environment(
+ environment
+ ) or None
+
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open, tty,
ports, environment, volumes,
@@ -461,34 +478,33 @@ class ContainerApiMixin(object):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
init (bool): Run an init inside the container that forwards
signals and reaps processes
- init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
- isolation (str): Isolation technology to use. Default: `None`.
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ isolation (str): Isolation technology to use. Default: ``None``.
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration
lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
+ mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -500,11 +516,13 @@ class ContainerApiMixin(object):
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
- on the bridge network.
+ the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``port_bindings``.
+
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
@@ -513,7 +531,8 @@ class ContainerApiMixin(object):
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
- for more information.
+ for more information.
+ Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -543,10 +562,12 @@ class ContainerApiMixin(object):
}
ulimits (:py:class:`list`): Ulimits to set inside the container,
- as a list of dicts.
+ as a list of :py:class:`docker.types.Ulimit` instances.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
runtime (str): Runtime to use with this container.
@@ -558,7 +579,7 @@ class ContainerApiMixin(object):
Example:
- >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ >>> client.api.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
@@ -589,11 +610,11 @@ class ContainerApiMixin(object):
Example:
- >>> docker_client.create_network('network1')
- >>> networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config()
+ >>> client.api.create_network('network1')
+ >>> networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config()
})
- >>> container = docker_client.create_container(
+ >>> container = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -609,24 +630,27 @@ class ContainerApiMixin(object):
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
- links (:py:class:`list`): A list of links for this endpoint.
- Containers declared in this list will be linked to this
- container. Defaults to ``None``.
+ links (dict): Mapping of links for this endpoint using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to this
+ container using the provided alias. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
Example:
- >>> endpoint_config = client.create_endpoint_config(
+ >>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
- links=['app_db'],
+ links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
)
@@ -676,7 +700,8 @@ class ContainerApiMixin(object):
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
- def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@@ -687,6 +712,8 @@ class ContainerApiMixin(object):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -695,12 +722,29 @@ class ContainerApiMixin(object):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> c = docker.APIClient()
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = c.api.get_archive(container, '/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
params = {
'path': path
}
+ headers = {
+ "Accept-Encoding": "gzip, deflate"
+ } if encode_stream else {
+ "Accept-Encoding": "identity"
+ }
url = self._url('/containers/{0}/archive', container)
- res = self._get(url, params=params, stream=True)
+ res = self._get(url, params=params, stream=True, headers=headers)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
@@ -744,7 +788,7 @@ class ContainerApiMixin(object):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
- if not isinstance(signal, six.string_types):
+ if not isinstance(signal, str):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
@@ -763,16 +807,16 @@ class ContainerApiMixin(object):
Args:
container (str): The container to get logs from
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -870,7 +914,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- >>> cli.port('7174d6347063', 80)
+ >>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
@@ -888,9 +932,10 @@ class ContainerApiMixin(object):
if '/' in private_port:
return port_settings.get(private_port)
- h_ports = port_settings.get(private_port + '/tcp')
- if h_ports is None:
- h_ports = port_settings.get(private_port + '/udp')
+ for protocol in ['tcp', 'udp', 'sctp']:
+ h_ports = port_settings.get(private_port + '/' + protocol)
+ if h_ports:
+ break
return h_ports
@@ -1048,10 +1093,10 @@ class ContainerApiMixin(object):
Example:
- >>> container = cli.create_container(
+ >>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
- >>> cli.start(container=container.get('Id'))
+ >>> client.api.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
@@ -1072,7 +1117,8 @@ class ContainerApiMixin(object):
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -1086,6 +1132,10 @@ class ContainerApiMixin(object):
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
+ if decode:
+ raise errors.InvalidArgument(
+ "decode is only available in conjunction with stream=True"
+ )
return self._result(self._get(url, params={'stream': False}),
json=True)
@@ -1170,8 +1220,8 @@ class ContainerApiMixin(object):
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
- mem_limit (int or str): Memory limit
- mem_reservation (int or str): Memory soft limit
+ mem_limit (float or str): Memory limit
+ mem_reservation (float or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 76a94cf..a857213 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils
-class DaemonApiMixin(object):
+class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
@@ -42,8 +42,8 @@ class DaemonApiMixin(object):
Example:
- >>> for event in client.events()
- ... print event
+ >>> for event in client.events(decode=True)
+ ... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
@@ -54,7 +54,7 @@ class DaemonApiMixin(object):
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -109,7 +109,7 @@ class DaemonApiMixin(object):
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
@@ -124,13 +124,15 @@ class DaemonApiMixin(object):
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
- self._auth_configs = auth.load_config(dockercfg_path)
- elif not self._auth_configs:
- self._auth_configs = auth.load_config()
-
- authcfg = auth.resolve_authconfig(
- self._auth_configs, registry, credstore_env=self.credstore_env,
- )
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
@@ -146,9 +148,7 @@ class DaemonApiMixin(object):
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- if 'auths' not in self._auth_configs:
- self._auth_configs['auths'] = {}
- self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 986d87f..496308a 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,10 +1,8 @@
-import six
-
from .. import errors
from .. import utils
-class ExecApiMixin(object):
+class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
@@ -45,7 +43,7 @@ class ExecApiMixin(object):
'Setting environment for exec is not supported in API < 1.25'
)
- if isinstance(cmd, six.string_types):
+ if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
@@ -118,7 +116,7 @@ class ExecApiMixin(object):
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
- socket=False):
+ socket=False, demux=False):
"""
Start a previously set up exec instance.
@@ -130,11 +128,15 @@ class ExecApiMixin(object):
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations.
+ demux (bool): Return stdout and stderr separately
Returns:
- (generator or str): If ``stream=True``, a generator yielding
- response chunks. If ``socket=True``, a socket object for the
- connection. A string containing response data otherwise.
+
+ (generator or str or tuple): If ``stream=True``, a generator
+ yielding response chunks. If ``socket=True``, a socket object for
+ the connection. A string containing response data otherwise. If
+ ``demux=True``, a tuple with two elements of type byte: stdout and
+ stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -162,4 +164,4 @@ class ExecApiMixin(object):
return self._result(res)
if socket:
return self._get_raw_response_socket(res)
- return self._read_from_socket(res, stream, tty)
+ return self._read_from_socket(res, stream, tty=tty, demux=demux)
diff --git a/docker/api/image.py b/docker/api/image.py
index 5f05d88..772d889 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,15 +1,13 @@
import logging
import os
-import six
-
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
-class ImageApiMixin(object):
+class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@@ -31,8 +29,8 @@ class ImageApiMixin(object):
Example:
- >>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> image = client.api.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
@@ -70,7 +68,8 @@ class ImageApiMixin(object):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
@@ -80,10 +79,18 @@ class ImageApiMixin(object):
If the server returns an error.
"""
params = {
- 'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
+ if name:
+ if utils.version_lt(self._version, '1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ if filters:
+ filters['reference'] = name
+ else:
+ filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
@@ -121,7 +128,7 @@ class ImageApiMixin(object):
params = _import_image_params(
repository, tag, image,
- src=(src if isinstance(src, six.string_types) else None),
+ src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
@@ -130,7 +137,7 @@ class ImageApiMixin(object):
return self._result(
self._post(u, data=None, params=params)
)
- elif isinstance(src, six.string_types): # from file path
+ elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
@@ -247,12 +254,15 @@ class ImageApiMixin(object):
@utils.minimum_version('1.30')
@utils.check_resource('image')
- def inspect_distribution(self, image):
+ def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
@@ -261,9 +271,21 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ registry, _ = auth.resolve_repository_name(image)
+
+ headers = {}
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ url = self._url("/distribution/{0}/json", image)
return self._result(
- self._get(self._url("/distribution/{0}/json", image)), True
+ self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
@@ -327,21 +349,24 @@ class ImageApiMixin(object):
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
- decode=False, platform=None):
+ decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
- tag (str): The tag to pull
- stream (bool): Stream the output as a generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
+ is set to ``latest``.
+ stream (bool): Stream the output as a generator. Make sure to
+ consume the generator, otherwise pull might get cancelled.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags, the ``tag`` parameter is
+ ignored.
Returns:
(generator or str): The output
@@ -352,8 +377,8 @@ class ImageApiMixin(object):
Example:
- >>> for line in cli.pull('busybox', stream=True):
- ... print(json.dumps(json.loads(line), indent=4))
+ >>> for line in client.api.pull('busybox', stream=True, decode=True):
+ ... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
@@ -366,8 +391,12 @@ class ImageApiMixin(object):
}
"""
- if not tag:
- repository, tag = utils.parse_repository_tag(repository)
+ repository, image_tag = utils.parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
+
+ if all_tags:
+ tag = None
+
registry, repo_name = auth.resolve_repository_name(repository)
params = {
@@ -413,10 +442,9 @@ class ImageApiMixin(object):
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
@@ -428,12 +456,12 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
- >>> for line in cli.push('yourname/app', stream=True):
- ... print line
- {"status":"Pushing repository yourname/app (1 tags)"}
- {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
- {"status":"Image already pushed, skipping","progressDetail":{},
- "id":"511136ea3c5a"}
+ >>> for line in client.api.push('yourname/app', stream=True, decode=True):
+ ... print(line)
+ {'status': 'Pushing repository yourname/app (1 tags)'}
+ {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
+ {'status': 'Image already pushed, skipping', 'progressDetail':{},
+ 'id': '511136ea3c5a'}
...
"""
@@ -479,13 +507,14 @@ class ImageApiMixin(object):
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
- def search(self, term):
+ def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
+ limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
@@ -494,8 +523,12 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ params = {'term': term}
+ if limit is not None:
+ params['limit'] = limit
+
return self._result(
- self._get(self._url("/images/search"), params={'term': term}),
+ self._get(self._url("/images/search"), params=params),
True
)
@@ -519,7 +552,7 @@ class ImageApiMixin(object):
Example:
- >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
@@ -536,7 +569,7 @@ class ImageApiMixin(object):
def is_file(src):
try:
return (
- isinstance(src, six.string_types) and
+ isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
diff --git a/docker/api/network.py b/docker/api/network.py
index 57ed8d3..e95c5fc 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -4,10 +4,10 @@ from ..utils import version_lt
from .. import utils
-class NetworkApiMixin(object):
+class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
- List networks. Similar to the ``docker networks ls`` command.
+ List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
@@ -15,7 +15,8 @@ class NetworkApiMixin(object):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
+ such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
@@ -74,7 +75,7 @@ class NetworkApiMixin(object):
Example:
A network using the bridge driver:
- >>> client.create_network("network1", driver="bridge")
+ >>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
@@ -89,7 +90,7 @@ class NetworkApiMixin(object):
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
- >>> docker_client.create_network("network1", driver="bridge",
+ >>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
@@ -215,7 +216,7 @@ class NetworkApiMixin(object):
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
- link_local_ips=None):
+ link_local_ips=None, driver_opt=None):
"""
Connect a container to a network.
@@ -239,7 +240,8 @@ class NetworkApiMixin(object):
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
- ipv6_address=ipv6_address, link_local_ips=link_local_ips
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips,
+ driver_opt=driver_opt
),
}
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index f6c0b13..57110f1 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -1,9 +1,7 @@
-import six
-
from .. import auth, utils
-class PluginApiMixin(object):
+class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
@@ -21,7 +19,7 @@ class PluginApiMixin(object):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
- data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
diff --git a/docker/api/secret.py b/docker/api/secret.py
index fa4c2ab..cd440b9 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -1,12 +1,10 @@
import base64
-import six
-
from .. import errors
from .. import utils
-class SecretApiMixin(object):
+class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
@@ -25,8 +23,7 @@ class SecretApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
@@ -53,7 +50,7 @@ class SecretApiMixin(object):
Retrieve secret metadata
Args:
- id (string): Full ID of the secret to remove
+ id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/service.py b/docker/api/service.py
index 03b0ca6..371f541 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -2,7 +2,8 @@ from .. import auth, errors, utils
from ..types import ServiceMode
-def _check_api_features(version, task_template, update_config, endpoint_spec):
+def _check_api_features(version, task_template, update_config, endpoint_spec,
+ rollback_config):
def raise_version_error(param, min_version):
raise errors.InvalidVersion(
@@ -18,10 +19,24 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25')
+ if utils.version_lt(version, '1.28'):
+ if update_config.get('FailureAction') == 'rollback':
+ raise_version_error(
+ 'UpdateConfig.failure_action rollback', '1.28'
+ )
+
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29')
+ if rollback_config is not None:
+ if utils.version_lt(version, '1.28'):
+ raise_version_error('rollback_config', '1.28')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('RollbackConfig.order', '1.29')
+
if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
@@ -30,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
- raise_version_error('force_update', '1.25')
+ raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
@@ -73,6 +88,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')
+ if utils.version_lt(version, '1.38'):
+ if container_spec.get('Init') is not None:
+ raise_version_error('ContainerSpec.init', '1.38')
+
if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
@@ -94,12 +113,12 @@ def _merge_task_template(current, override):
return merged
-class ServiceApiMixin(object):
+class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
- endpoint_spec=None
+ endpoint_spec=None, rollback_config=None
):
"""
Create a service.
@@ -114,8 +133,11 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
@@ -129,7 +151,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
url = self._url('/services/create')
@@ -160,6 +183,9 @@ class ServiceApiMixin(object):
if update_config is not None:
data['UpdateConfig'] = update_config
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@@ -176,7 +202,8 @@ class ServiceApiMixin(object):
into the service inspect output.
Returns:
- ``True`` if successful.
+ (dict): A dictionary of the server-side representation of the
+ service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
@@ -336,7 +363,8 @@ class ServiceApiMixin(object):
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
- endpoint_spec=None, fetch_current_spec=False):
+ endpoint_spec=None, fetch_current_spec=False,
+ rollback_config=None):
"""
Update a service.
@@ -354,15 +382,18 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
- ``True`` if successful.
+ A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
@@ -370,7 +401,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
if fetch_current_spec:
@@ -416,6 +448,11 @@ class ServiceApiMixin(object):
else:
data['UpdateConfig'] = current.get('UpdateConfig')
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+ else:
+ data['RollbackConfig'] = current.get('RollbackConfig')
+
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
@@ -440,5 +477,4 @@ class ServiceApiMixin(object):
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
- self._raise_for_status(resp)
- return True
+ return self._result(resp, json=True)
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 04595da..db40fdd 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,6 @@
import logging
-from six.moves import http_client
+import http.client as http_client
+from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
from .. import utils
@@ -7,7 +8,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class SwarmApiMixin(object):
+class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
@@ -57,10 +58,10 @@ class SwarmApiMixin(object):
Example:
- >>> spec = client.create_swarm_spec(
+ >>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
- >>> client.init_swarm(
+ >>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
@@ -82,7 +83,9 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, swarm_spec=None):
+ force_new_cluster=False, swarm_spec=None,
+ default_addr_pool=None, subnet_size=None,
+ data_path_addr=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
@@ -107,9 +110,17 @@ class SwarmApiMixin(object):
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
+ default_addr_pool (list of strings): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
- ``True`` if successful.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -119,15 +130,44 @@ class SwarmApiMixin(object):
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
+
+ if default_addr_pool is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Address pool is only available for API version >= 1.39'
+ )
+ # subnet_size becomes 0 if not set with default_addr_pool
+ if subnet_size is None:
+ subnet_size = DEFAULT_SWARM_SUBNET_SIZE
+
+ if subnet_size is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Subnet size is only available for API version >= 1.39'
+ )
+ # subnet_size is ignored if set without default_addr_pool
+ if default_addr_pool is None:
+ default_addr_pool = DEFAULT_SWARM_ADDR_POOL
+
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
+ 'DefaultAddrPool': default_addr_pool,
+ 'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
response = self._post_json(url, data=data)
- self._raise_for_status(response)
- return True
+ return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
@@ -165,7 +205,7 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
- advertise_addr=None):
+ advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
@@ -176,7 +216,7 @@ class SwarmApiMixin(object):
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
- Endpoint (VTEP). Default: ``None``
+ Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
@@ -184,6 +224,8 @@ class SwarmApiMixin(object):
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
@@ -193,11 +235,20 @@ class SwarmApiMixin(object):
If the server returns an error.
"""
data = {
- "RemoteAddrs": remote_addrs,
- "ListenAddr": listen_addr,
- "JoinToken": join_token,
- "AdvertiseAddr": advertise_addr,
+ 'RemoteAddrs': remote_addrs,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token,
+ 'AdvertiseAddr': advertise_addr,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
@@ -303,8 +354,8 @@ class SwarmApiMixin(object):
Example:
- >>> key = client.get_unlock_key()
- >>> client.unlock_node(key)
+ >>> key = client.api.get_unlock_key()
+ >>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
@@ -345,7 +396,7 @@ class SwarmApiMixin(object):
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
- >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
@@ -355,8 +406,10 @@ class SwarmApiMixin(object):
return True
@utils.minimum_version('1.24')
- def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
- rotate_manager_token=False):
+ def update_swarm(self, version, swarm_spec=None,
+ rotate_worker_token=False,
+ rotate_manager_token=False,
+ rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
@@ -370,6 +423,8 @@ class SwarmApiMixin(object):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Returns:
``True`` if the request went through.
@@ -378,12 +433,20 @@ class SwarmApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
-
url = self._url('/swarm/update')
- response = self._post_json(url, data=swarm_spec, params={
+ params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
- })
+ }
+ if rotate_manager_unlock_key:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Rotate manager unlock key '
+ 'is only available for API version >= 1.25'
+ )
+ params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
+
+ response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True
diff --git a/docker/api/volume.py b/docker/api/volume.py
index 900a608..86b0018 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -2,7 +2,7 @@ from .. import errors
from .. import utils
-class VolumeApiMixin(object):
+class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@@ -21,7 +21,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.volumes()
+ >>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
@@ -56,7 +56,7 @@ class VolumeApiMixin(object):
Example:
- >>> volume = cli.create_volume(name='foobar', driver='local',
+ >>> volume = client.api.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
@@ -104,7 +104,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.inspect_volume('foobar')
+ >>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
diff --git a/docker/auth.py b/docker/auth.py
index 9635f93..4fa798f 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -2,14 +2,12 @@ import base64
import json
import logging
-import dockerpycreds
-import six
-
+from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -18,13 +16,13 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid index name ({0}). Cannot begin or end with a'
+ 'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
@@ -39,11 +37,11 @@ def resolve_index_name(index_name):
def get_config_header(client, registry):
log.debug('Looking for auth config')
- if not client._auth_configs:
+ if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
- client._auth_configs = load_config()
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
@@ -70,81 +68,258 @@ def split_repo_name(repo_name):
def get_credential_store(authconfig, registry):
- if not registry or registry == INDEX_NAME:
- registry = 'https://index.docker.io/v1/'
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in entries.items():
+ if not isinstance(entry, dict):
+ log.debug(
+ 'Config entry for key {} is not auth config'.format(
+ registry
+ )
+ )
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {}'.format(
+ registry
+ )
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug(
+ 'Found an IdentityToken entry for registry {}'.format(
+ registry
+ )
+ )
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ 'Auth data for {} is absent. Client might be using a '
+ 'credentials store instead.'.format(registry)
+ )
+ conf[registry] = {}
+ continue
- return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
- 'credsStore'
- )
+ username, password = decode_auth(entry['auth'])
+ log.debug(
+ 'Found entry (registry={}, username={})'
+ .format(repr(registry), repr(username))
+ )
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
-def resolve_authconfig(authconfig, registry=None, credstore_env=None):
- """
- Returns the authentication data from the given auth configuration for a
- specific registry. As with the Docker client, legacy entries in the config
- with full URLs are stripped down to hostnames before checking for a match.
- Returns None if no match was found.
- """
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
- if 'credHelpers' in authconfig or 'credsStore' in authconfig:
- store_name = get_credential_store(authconfig, registry)
- if store_name is not None:
- log.debug(
- 'Using credentials store "{0}"'.format(store_name)
- )
- cfg = _resolve_authconfig_credstore(
- authconfig, registry, store_name, env=credstore_env
- )
- if cfg is not None:
- return cfg
- log.debug('No entry in credstore - fetching from auth dict')
+ @property
+ def auths(self):
+ return self.get('auths', {})
- # Default to the public index server
- registry = resolve_index_name(registry) if registry else INDEX_NAME
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
- authdict = authconfig.get('auths', {})
- if registry in authdict:
- log.debug("Found {0}".format(repr(registry)))
- return authdict[registry]
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
- for key, conf in six.iteritems(authdict):
- if resolve_index_name(key) == registry:
- log.debug("Found {0}".format(repr(key)))
- return conf
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
- log.debug("No entry found")
- return None
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug(
+ f'Using credentials store "{store_name}"'
+ )
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug(f"Looking for auth entry for {repr(registry)}")
-def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
- env=None):
- if not registry or registry == INDEX_NAME:
- # The ecosystem is a little schizophrenic with index.docker.io VS
- # docker.io - in that case, it seems the full URL is necessary.
- registry = INDEX_URL
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
- store = dockerpycreds.Store(credstore_name, environment=env)
- try:
- data = store.get(registry)
- res = {
- 'ServerAddress': registry,
- }
- if data['Username'] == TOKEN_USERNAME:
- res['IdentityToken'] = data['Secret']
- else:
- res.update({
- 'Username': data['Username'],
- 'Password': data['Secret'],
- })
- return res
- except dockerpycreds.CredentialsNotFound as e:
- log.debug('No entry found')
+ if registry in self.auths:
+ log.debug(f"Found {repr(registry)}")
+ return self.auths[registry]
+
+ for key, conf in self.auths.items():
+ if resolve_index_name(key) == registry:
+ log.debug(f"Found {repr(key)}")
+ return conf
+
+ log.debug("No entry found")
return None
- except dockerpycreds.StoreError as e:
- raise errors.DockerException(
- 'Credentials store error: {0}'.format(repr(e))
- )
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug(f"Looking for auth entry for {repr(registry)}")
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except credentials.CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except credentials.StoreError as e:
+ raise errors.DockerException(
+ f'Credentials store error: {repr(e)}'
+ )
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = credentials.Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
@@ -152,7 +327,7 @@ def convert_to_hostname(url):
def decode_auth(auth):
- if isinstance(auth, six.string_types):
+ if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
@@ -177,100 +352,11 @@ def parse_auth(entries, raise_on_error=False):
Authentication registry.
"""
- conf = {}
- for registry, entry in six.iteritems(entries):
- if not isinstance(entry, dict):
- log.debug(
- 'Config entry for key {0} is not auth config'.format(registry)
- )
- # We sometimes fall back to parsing the whole config as if it was
- # the auth config by itself, for legacy purposes. In that case, we
- # fail silently and return an empty conf if any of the keys is not
- # formatted properly.
- if raise_on_error:
- raise errors.InvalidConfigFile(
- 'Invalid configuration for registry {0}'.format(registry)
- )
- return {}
- if 'identitytoken' in entry:
- log.debug('Found an IdentityToken entry for registry {0}'.format(
- registry
- ))
- conf[registry] = {
- 'IdentityToken': entry['identitytoken']
- }
- continue # Other values are irrelevant if we have a token, skip.
-
- if 'auth' not in entry:
- # Starting with engine v1.11 (API 1.23), an empty dictionary is
- # a valid value in the auths config.
- # https://github.com/docker/compose/issues/3265
- log.debug(
- 'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'.format(registry)
- )
- conf[registry] = {}
- continue
-
- username, password = decode_auth(entry['auth'])
- log.debug(
- 'Found entry (registry={0}, username={1})'
- .format(repr(registry), repr(username))
- )
-
- conf[registry] = {
- 'username': username,
- 'password': password,
- 'email': entry.get('email'),
- 'serveraddress': registry,
- }
- return conf
-
-
-def load_config(config_path=None, config_dict=None):
- """
- Loads authentication data from a Docker configuration file in the given
- root directory or if config_path is passed use given path.
- Lookup priority:
- explicit config_path parameter > DOCKER_CONFIG environment variable >
- ~/.docker/config.json > ~/.dockercfg
- """
+ return AuthConfig.parse_auth(entries, raise_on_error)
- if not config_dict:
- config_file = config.find_config_file(config_path)
- if not config_file:
- return {}
- try:
- with open(config_file) as f:
- config_dict = json.load(f)
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
- return _load_legacy_config(config_file)
-
- res = {}
- if config_dict.get('auths'):
- log.debug("Found 'auths' section")
- res.update({
- 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
- })
- if config_dict.get('credsStore'):
- log.debug("Found 'credsStore' section")
- res.update({'credsStore': config_dict.pop('credsStore')})
- if config_dict.get('credHelpers'):
- log.debug("Found 'credHelpers' section")
- res.update({'credHelpers': config_dict.pop('credHelpers')})
- if res:
- return res
-
- log.debug(
- "Couldn't find auth-related section ; attempting to interpret"
- "as auth-only file"
- )
- return {'auths': parse_auth(config_dict)}
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
diff --git a/docker/client.py b/docker/client.py
index 8d4a52b..4dbd846 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,5 +1,5 @@
from .api.client import APIClient
-from .constants import DEFAULT_TIMEOUT_SECONDS
+from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
@@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
-class DockerClient(object):
+class DockerClient:
"""
A client for communicating with a Docker server.
@@ -26,7 +26,7 @@ class DockerClient(object):
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -35,6 +35,11 @@ class DockerClient(object):
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@@ -62,14 +67,19 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is
+ made via shelling out to the ssh client. Ensure the ssh
+ client is installed and configured on the host.
Example:
@@ -80,9 +90,15 @@ class DockerClient(object):
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
+ max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
+ use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
- timeout=timeout, version=version, **kwargs_from_env(**kwargs)
+ timeout=timeout,
+ max_pool_size=max_pool_size,
+ version=version,
+ use_ssh_client=use_ssh_client,
+ **kwargs_from_env(**kwargs)
)
# Resources
@@ -196,7 +212,7 @@ class DockerClient(object):
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
- s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
diff --git a/docker/constants.py b/docker/constants.py
index 7565a76..d5bfc35 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.35'
+DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -9,12 +9,36 @@ CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
-DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
+DEFAULT_USER_AGENT = f"docker-sdk-python/{version}"
DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
+DEFAULT_MAX_POOL_SIZE = 10
+
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/docker/context/__init__.py b/docker/context/__init__.py
new file mode 100644
index 0000000..0a6707f
--- /dev/null
+++ b/docker/context/__init__.py
@@ -0,0 +1,3 @@
+# flake8: noqa
+from .context import Context
+from .api import ContextAPI
diff --git a/docker/context/api.py b/docker/context/api.py
new file mode 100644
index 0000000..380e8c4
--- /dev/null
+++ b/docker/context/api.py
@@ -0,0 +1,203 @@
+import json
+import os
+
+from docker import errors
+from docker.context.config import get_meta_dir
+from docker.context.config import METAFILE
+from docker.context.config import get_current_context_name
+from docker.context.config import write_context_name_to_docker_config
+from docker.context import Context
+
+
+class ContextAPI:
+ """Context API.
+ Contains methods for context management:
+ create, list, remove, get, inspect.
+ """
+ DEFAULT_CONTEXT = Context("default", "swarm")
+
+ @classmethod
+ def create_context(
+ cls, name, orchestrator=None, host=None, tls_cfg=None,
+ default_namespace=None, skip_tls_verify=False):
+ """Creates a new context.
+ Returns:
+ (Context): a Context object.
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextAlreadyExists`
+ If a context with the name already exists.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.create_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ '"default" is a reserved context name')
+ ctx = Context.load_context(name)
+ if ctx:
+ raise errors.ContextAlreadyExists(name)
+ endpoint = "docker"
+ if orchestrator and orchestrator != "swarm":
+ endpoint = orchestrator
+ ctx = Context(name, orchestrator)
+ ctx.set_endpoint(
+ endpoint, host, tls_cfg,
+ skip_tls_verify=skip_tls_verify,
+ def_namespace=default_namespace)
+ ctx.save()
+ return ctx
+
+ @classmethod
+ def get_context(cls, name=None):
+ """Retrieves a context object.
+ Args:
+ name (str): The name of the context
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.get_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ name = get_current_context_name()
+ if name == "default":
+ return cls.DEFAULT_CONTEXT
+ return Context.load_context(name)
+
+ @classmethod
+ def contexts(cls):
+ """Context list.
+ Returns:
+ (Context): List of context objects.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ names = []
+ for dirname, dirnames, fnames in os.walk(get_meta_dir()):
+ for filename in fnames + dirnames:
+ if filename == METAFILE:
+ try:
+ data = json.load(
+ open(os.path.join(dirname, filename)))
+ names.append(data["Name"])
+ except Exception as e:
+ raise errors.ContextException(
+ "Failed to load metafile {}: {}".format(
+ filename, e))
+
+ contexts = [cls.DEFAULT_CONTEXT]
+ for name in names:
+ contexts.append(Context.load_context(name))
+ return contexts
+
+ @classmethod
+ def get_current_context(cls):
+ """Get current context.
+ Returns:
+ (Context): current context object.
+ """
+ return cls.get_context()
+
+ @classmethod
+ def set_current_context(cls, name="default"):
+ ctx = cls.get_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ err = write_context_name_to_docker_config(name)
+ if err:
+ raise errors.ContextException(
+ f'Failed to set current context: {err}')
+
+ @classmethod
+ def remove_context(cls, name):
+ """Remove a context. Similar to the ``docker context rm`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ 'context "default" cannot be removed')
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+ if name == get_current_context_name():
+ write_context_name_to_docker_config(None)
+ ctx.remove()
+
+ @classmethod
+ def inspect_context(cls, name="default"):
+ """Remove a context. Similar to the ``docker context inspect`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ return cls.DEFAULT_CONTEXT()
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ return ctx()
diff --git a/docker/context/config.py b/docker/context/config.py
new file mode 100644
index 0000000..d761aef
--- /dev/null
+++ b/docker/context/config.py
@@ -0,0 +1,81 @@
+import os
+import json
+import hashlib
+
+from docker import utils
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.constants import DEFAULT_UNIX_SOCKET
+from docker.utils.config import find_config_file
+
+METAFILE = "meta.json"
+
+
+def get_current_context_name():
+ name = "default"
+ docker_cfg_path = find_config_file()
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ name = json.load(f).get("currentContext", "default")
+ except Exception:
+ return "default"
+ return name
+
+
+def write_context_name_to_docker_config(name=None):
+ if name == 'default':
+ name = None
+ docker_cfg_path = find_config_file()
+ config = {}
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ config = json.load(f)
+ except Exception as e:
+ return e
+ current_context = config.get("currentContext", None)
+ if current_context and not name:
+ del config["currentContext"]
+ elif name:
+ config["currentContext"] = name
+ else:
+ return
+ try:
+ with open(docker_cfg_path, "w") as f:
+ json.dump(config, f, indent=4)
+ except Exception as e:
+ return e
+
+
+def get_context_id(name):
+ return hashlib.sha256(name.encode('utf-8')).hexdigest()
+
+
+def get_context_dir():
+ return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
+
+
+def get_meta_dir(name=None):
+ meta_dir = os.path.join(get_context_dir(), "meta")
+ if name:
+ return os.path.join(meta_dir, get_context_id(name))
+ return meta_dir
+
+
+def get_meta_file(name):
+ return os.path.join(get_meta_dir(name), METAFILE)
+
+
+def get_tls_dir(name=None, endpoint=""):
+ context_dir = get_context_dir()
+ if name:
+ return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
+ return os.path.join(context_dir, "tls")
+
+
+def get_context_host(path=None, tls=False):
+ host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
+ if host == DEFAULT_UNIX_SOCKET:
+ # remove http+ from default docker socket url
+ return host.strip("http+")
+ return host
diff --git a/docker/context/context.py b/docker/context/context.py
new file mode 100644
index 0000000..dbaa01c
--- /dev/null
+++ b/docker/context/context.py
@@ -0,0 +1,243 @@
+import os
+import json
+from shutil import copyfile, rmtree
+from docker.tls import TLSConfig
+from docker.errors import ContextException
+from docker.context.config import get_meta_dir
+from docker.context.config import get_meta_file
+from docker.context.config import get_tls_dir
+from docker.context.config import get_context_host
+
+
+class Context:
+ """A context."""
+
+ def __init__(self, name, orchestrator=None, host=None, endpoints=None,
+ tls=False):
+ if not name:
+ raise Exception("Name not provided")
+ self.name = name
+ self.context_type = None
+ self.orchestrator = orchestrator
+ self.endpoints = {}
+ self.tls_cfg = {}
+ self.meta_path = "IN MEMORY"
+ self.tls_path = "IN MEMORY"
+
+ if not endpoints:
+ # set default docker endpoint if no endpoint is set
+ default_endpoint = "docker" if (
+ not orchestrator or orchestrator == "swarm"
+ ) else orchestrator
+
+ self.endpoints = {
+ default_endpoint: {
+ "Host": get_context_host(host, tls),
+ "SkipTLSVerify": not tls
+ }
+ }
+ return
+
+ # check docker endpoints
+ for k, v in endpoints.items():
+ if not isinstance(v, dict):
+ # unknown format
+ raise ContextException("""Unknown endpoint format for
+ context {}: {}""".format(name, v))
+
+ self.endpoints[k] = v
+ if k != "docker":
+ continue
+
+ self.endpoints[k]["Host"] = v.get("Host", get_context_host(
+ host, tls))
+ self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
+ "SkipTLSVerify", not tls))
+
+ def set_endpoint(
+ self, name="docker", host=None, tls_cfg=None,
+ skip_tls_verify=False, def_namespace=None):
+ self.endpoints[name] = {
+ "Host": get_context_host(host, not skip_tls_verify),
+ "SkipTLSVerify": skip_tls_verify
+ }
+ if def_namespace:
+ self.endpoints[name]["DefaultNamespace"] = def_namespace
+
+ if tls_cfg:
+ self.tls_cfg[name] = tls_cfg
+
+ def inspect(self):
+ return self.__call__()
+
+ @classmethod
+ def load_context(cls, name):
+ meta = Context._load_meta(name)
+ if meta:
+ instance = cls(
+ meta["Name"],
+ orchestrator=meta["Metadata"].get("StackOrchestrator", None),
+ endpoints=meta.get("Endpoints", None))
+ instance.context_type = meta["Metadata"].get("Type", None)
+ instance._load_certs()
+ instance.meta_path = get_meta_dir(name)
+ return instance
+ return None
+
+ @classmethod
+ def _load_meta(cls, name):
+ meta_file = get_meta_file(name)
+ if not os.path.isfile(meta_file):
+ return None
+
+ metadata = {}
+ try:
+ with open(meta_file) as f:
+ metadata = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # unknown format
+ raise Exception("""Detected corrupted meta file for
+ context {} : {}""".format(name, e))
+
+ # for docker endpoints, set defaults for
+ # Host and SkipTLSVerify fields
+ for k, v in metadata["Endpoints"].items():
+ if k != "docker":
+ continue
+ metadata["Endpoints"][k]["Host"] = v.get(
+ "Host", get_context_host(None, False))
+ metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
+ v.get("SkipTLSVerify", True))
+
+ return metadata
+
+ def _load_certs(self):
+ certs = {}
+ tls_dir = get_tls_dir(self.name)
+ for endpoint in self.endpoints.keys():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ continue
+ ca_cert = None
+ cert = None
+ key = None
+ for filename in os.listdir(os.path.join(tls_dir, endpoint)):
+ if filename.startswith("ca"):
+ ca_cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("cert"):
+ cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("key"):
+ key = os.path.join(tls_dir, endpoint, filename)
+ if all([ca_cert, cert, key]):
+ verify = None
+ if endpoint == "docker" and not self.endpoints["docker"].get(
+ "SkipTLSVerify", False):
+ verify = True
+ certs[endpoint] = TLSConfig(
+ client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
+ self.tls_cfg = certs
+ self.tls_path = tls_dir
+
+ def save(self):
+ meta_dir = get_meta_dir(self.name)
+ if not os.path.isdir(meta_dir):
+ os.makedirs(meta_dir)
+ with open(get_meta_file(self.name), "w") as f:
+ f.write(json.dumps(self.Metadata))
+
+ tls_dir = get_tls_dir(self.name)
+ for endpoint, tls in self.tls_cfg.items():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ os.makedirs(os.path.join(tls_dir, endpoint))
+
+ ca_file = tls.ca_cert
+ if ca_file:
+ copyfile(ca_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(ca_file)))
+
+ if tls.cert:
+ cert_file, key_file = tls.cert
+ copyfile(cert_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(cert_file)))
+ copyfile(key_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(key_file)))
+
+ self.meta_path = get_meta_dir(self.name)
+ self.tls_path = get_tls_dir(self.name)
+
+ def remove(self):
+ if os.path.isdir(self.meta_path):
+ rmtree(self.meta_path)
+ if os.path.isdir(self.tls_path):
+ rmtree(self.tls_path)
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ def __str__(self):
+ return json.dumps(self.__call__(), indent=2)
+
+ def __call__(self):
+ result = self.Metadata
+ result.update(self.TLSMaterial)
+ result.update(self.Storage)
+ return result
+
+ def is_docker_host(self):
+ return self.context_type is None
+
+ @property
+ def Name(self):
+ return self.name
+
+ @property
+ def Host(self):
+ if not self.orchestrator or self.orchestrator == "swarm":
+ endpoint = self.endpoints.get("docker", None)
+ if endpoint:
+ return endpoint.get("Host", None)
+ return None
+
+ return self.endpoints[self.orchestrator].get("Host", None)
+
+ @property
+ def Orchestrator(self):
+ return self.orchestrator
+
+ @property
+ def Metadata(self):
+ meta = {}
+ if self.orchestrator:
+ meta = {"StackOrchestrator": self.orchestrator}
+ return {
+ "Name": self.name,
+ "Metadata": meta,
+ "Endpoints": self.endpoints
+ }
+
+ @property
+ def TLSConfig(self):
+ key = self.orchestrator
+ if not key or key == "swarm":
+ key = "docker"
+ if key in self.tls_cfg.keys():
+ return self.tls_cfg[key]
+ return None
+
+ @property
+ def TLSMaterial(self):
+ certs = {}
+ for endpoint, tls in self.tls_cfg.items():
+ cert, key = tls.cert
+ certs[endpoint] = list(
+ map(os.path.basename, [tls.ca_cert, cert, key]))
+ return {
+ "TLSMaterial": certs
+ }
+
+ @property
+ def Storage(self):
+ return {
+ "Storage": {
+ "MetadataPath": self.meta_path,
+ "TLSPath": self.tls_path
+ }}
diff --git a/docker/credentials/__init__.py b/docker/credentials/__init__.py
new file mode 100644
index 0000000..31ad28e
--- /dev/null
+++ b/docker/credentials/__init__.py
@@ -0,0 +1,4 @@
+# flake8: noqa
+from .store import Store
+from .errors import StoreError, CredentialsNotFound
+from .constants import *
diff --git a/docker/credentials/constants.py b/docker/credentials/constants.py
new file mode 100644
index 0000000..6a82d8d
--- /dev/null
+++ b/docker/credentials/constants.py
@@ -0,0 +1,4 @@
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/docker/credentials/errors.py b/docker/credentials/errors.py
new file mode 100644
index 0000000..42a1bc1
--- /dev/null
+++ b/docker/credentials/errors.py
@@ -0,0 +1,25 @@
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(
+ 'No matching credentials in {}'.format(
+ program
+ )
+ )
+ return StoreError(
+ 'Credentials store {} exited with "{}".'.format(
+ program, cpe.output.decode('utf-8').strip()
+ )
+ )
diff --git a/docker/credentials/store.py b/docker/credentials/store.py
new file mode 100644
index 0000000..e55976f
--- /dev/null
+++ b/docker/credentials/store.py
@@ -0,0 +1,94 @@
+import errno
+import json
+import subprocess
+
+from . import constants
+from . import errors
+from .utils import create_environment_dict
+from .utils import find_executable
+
+
+class Store:
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = find_executable(self.program)
+ self.environment = environment
+ if self.exe is None:
+ raise errors.InitializationError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ f'No matching credentials in {self.program}'
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+ else:
+ raise errors.StoreError(
+ 'Unexpected OS error "{}", errno={}'.format(
+ e.strerror, e.errno
+ )
+ )
+ return output
diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py
new file mode 100644
index 0000000..3f720ef
--- /dev/null
+++ b/docker/credentials/utils.py
@@ -0,0 +1,38 @@
+import distutils.spawn
+import os
+import sys
+
+
+def find_executable(executable, path=None):
+ """
+ As distutils.spawn.find_executable, but on Windows, look up
+ every extension declared in PATHEXT instead of just `.exe`
+ """
+ if sys.platform != 'win32':
+ return distutils.spawn.find_executable(executable, path)
+
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ for ext in extensions:
+ f = os.path.join(p, base + ext)
+ if os.path.isfile(f):
+ return f
+ return None
+ else:
+ return executable
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/docker/errors.py b/docker/errors.py
index 0253695..ba95256 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -38,23 +38,25 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
- super(APIError, self).__init__(message)
+ super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
- message = super(APIError, self).__str__()
+ message = super().__str__()
if self.is_client_error():
- message = '{0} Client Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Client Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
elif self.is_server_error():
- message = '{0} Server Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Server Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
if self.explanation:
- message = '{0} ("{1}")'.format(message, self.explanation)
+ message = f'{message} ("{self.explanation}")'
return message
@@ -63,6 +65,9 @@ class APIError(requests.exceptions.HTTPError, DockerException):
if self.response is not None:
return self.response.status_code
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
def is_client_error(self):
if self.status_code is None:
return False
@@ -128,11 +133,11 @@ class ContainerError(DockerException):
self.image = image
self.stderr = stderr
- err = ": {}".format(stderr) if stderr is not None else ""
+ err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
- super(ContainerError, self).__init__(msg)
+ super().__init__(msg)
class StreamParseError(RuntimeError):
@@ -142,7 +147,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException):
def __init__(self, reason, build_log):
- super(BuildError, self).__init__(reason)
+ super().__init__(reason)
self.msg = reason
self.build_log = build_log
@@ -152,11 +157,43 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs):
- quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
- text = ["{}() ".format(name)]
+ quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
+ text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return (f"missing parameter: {self.param}")
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context {self.name} already exists")
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context '{self.name}' not found")
diff --git a/docker/models/configs.py b/docker/models/configs.py
index 7f23f65..3588c8b 100644
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
diff --git a/docker/models/containers.py b/docker/models/containers.py
index b33a718..957deed 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -15,7 +15,12 @@ from .resource import Collection, Model
class Container(Model):
-
+ """ Local representation of a container object. Detailed configuration may
+ be accessed through the :py:attr:`attrs` attribute. Note that local
+ attributes are cached; users may call :py:meth:`reload` to
+ query the Docker daemon for the current properties, causing
+ :py:attr:`attrs` to be refreshed.
+ """
@property
def name(self):
"""
@@ -57,6 +62,13 @@ class Container(Model):
return self.attrs['State']['Status']
return self.attrs['State']
+ @property
+ def ports(self):
+ """
+ The ports that the container exposes as a dictionary.
+ """
+ return self.attrs.get('NetworkSettings', {}).get('Ports', {})
+
def attach(self, **kwargs):
"""
Attach to this container.
@@ -139,7 +151,7 @@ class Container(Model):
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
- socket=False, environment=None, workdir=None):
+ socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
@@ -161,16 +173,18 @@ class Container(Model):
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
+ demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
- either ``stream```or ``socket`` is ``True``.
- output: (generator or str):
+ either ``stream`` or ``socket`` is ``True``.
+ output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
- A string containing response data otherwise.
+ If ``demux=True``, a tuple of two bytes: stdout and stderr.
+ A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
@@ -179,10 +193,11 @@ class Container(Model):
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
- workdir=workdir
+ workdir=workdir,
)
exec_output = self.client.api.exec_start(
- resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
+ demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
@@ -210,7 +225,8 @@ class Container(Model):
"""
return self.client.api.export(self.id, chunk_size)
- def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
@@ -220,6 +236,8 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -228,8 +246,20 @@ class Container(Model):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = container.get_archive('/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
- return self.client.api.get_archive(self.id, path, chunk_size)
+ return self.client.api.get_archive(self.id, path,
+ chunk_size, encode_stream)
def kill(self, signal=None):
"""
@@ -253,16 +283,16 @@ class Container(Model):
generator you can iterate over to retrieve log output as it happens.
Args:
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -380,7 +410,8 @@ class Container(Model):
Args:
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -521,12 +552,15 @@ class ContainerCollection(Collection):
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
+ cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
+ cpu_rt_period (int): Limit CPU real-time period in microseconds.
+ cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
@@ -549,6 +583,9 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
@@ -558,7 +595,7 @@ class ContainerCollection(Collection):
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
@@ -570,19 +607,17 @@ class ContainerCollection(Collection):
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
+ kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration.
+ lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
@@ -590,6 +625,7 @@ class ContainerCollection(Collection):
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
+ mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -613,6 +649,7 @@ class ContainerCollection(Collection):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``ports``.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
@@ -628,8 +665,8 @@ class ContainerCollection(Collection):
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
- ``port/protocol``, where the protocol is either ``tcp`` or
- ``udp``.
+ ``port/protocol``, where the protocol is either ``tcp``,
+ ``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
@@ -646,6 +683,7 @@ class ContainerCollection(Collection):
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
+ Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -662,6 +700,7 @@ class ContainerCollection(Collection):
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
+ runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
@@ -691,13 +730,21 @@ class ContainerCollection(Collection):
}
tty (bool): Allocate a pseudo-TTY.
- ulimits (:py:class:`list`): Ulimits to set inside the container, as
- a list of dicts.
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of :py:class:`docker.types.Ulimit` instances.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
@@ -714,10 +761,17 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+ Or a list of strings which each one of its elements specifies a mount volume.
+
+ For example:
+
+ .. code-block:: python
+
+ ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
+
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
- runtime (str): Runtime to use with this container.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
@@ -863,7 +917,8 @@ class ContainerCollection(Collection):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -932,8 +987,8 @@ RUN_CREATE_KWARGS = [
'stdin_open',
'stop_signal',
'tty',
+ 'use_config_proxy',
'user',
- 'volume_driver',
'working_dir',
]
@@ -960,6 +1015,7 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps',
'device_write_iops',
'devices',
+ 'device_requests',
'dns_opt',
'dns_search',
'dns',
@@ -995,7 +1051,9 @@ RUN_HOST_CONFIG_KWARGS = [
'tmpfs',
'ulimits',
'userns_mode',
+ 'uts_mode',
'version',
+ 'volume_driver',
'volumes_from',
'runtime'
]
diff --git a/docker/models/images.py b/docker/models/images.py
index 41632c6..46f8efe 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -1,7 +1,6 @@
import itertools
import re
-
-import six
+import warnings
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
@@ -16,7 +15,7 @@ class Image(Model):
An image on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+ return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
@@ -59,14 +58,20 @@ class Image(Model):
"""
return self.client.api.history(self.id)
- def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
- chunk_size (int): The number of bytes returned by each iteration
- of the generator. If ``None``, data will be streamed as it is
- received. Default: 2 MB
+ chunk_size (int): The generator will return up to that much data
+ per iteration, but may return less. If ``None``, data will be
+ streamed as it is received. Default: 2 MB
+ named (str or bool): If ``False`` (default), the tarball will not
+ retain repository and tag information for this image. If set
+ to ``True``, the first tag in the :py:attr:`~tags` list will
+ be used to identify the image. Alternatively, any element of
+ the :py:attr:`~tags` list can be used as an argument to use
+ that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
@@ -77,13 +82,23 @@ class Image(Model):
Example:
- >>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
- >>> for chunk in image:
+ >>> image = cli.images.get("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
+ >>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
- return self.client.api.get_image(self.id, chunk_size)
+ img = self.id
+ if named:
+ img = self.tags[0] if self.tags else img
+ if isinstance(named, str):
+ if named not in self.tags:
+ raise InvalidArgument(
+ f"{named} is not a valid tag for this image"
+ )
+ img = named
+
+ return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
@@ -110,7 +125,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
- super(RegistryData, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.image_name = image_name
@property
@@ -163,7 +178,7 @@ class RegistryData(Model):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
- '"{0}" is not a valid platform descriptor'.format(platform)
+ f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
@@ -241,6 +256,10 @@ class ImageCollection(Collection):
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
@@ -256,7 +275,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
- if isinstance(resp, six.string_types):
+ if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
@@ -294,22 +313,26 @@ class ImageCollection(Collection):
"""
return self.prepare_model(self.client.api.inspect_image(name))
- def get_registry_data(self, name):
+ def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
+
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
- attrs=self.client.api.inspect_distribution(name),
+ attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
@@ -325,7 +348,8 @@ class ImageCollection(Collection):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(list of :py:class:`Image`): The images.
@@ -369,12 +393,13 @@ class ImageCollection(Collection):
return [self.get(i) for i in images]
- def pull(self, repository, tag=None, **kwargs):
+ def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
- If no tag is specified, all tags from that repository will be
- pulled.
+ If ``tag`` is ``None`` or empty, it is set to ``latest``.
+ If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
+ tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@@ -383,15 +408,15 @@ class ImageCollection(Collection):
Args:
repository (str): The repository to pull
tag (str): The tag to pull
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.client.DockerClient.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
- If no ``tag`` was specified, the method will return a list
+ If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
@@ -401,16 +426,30 @@ class ImageCollection(Collection):
Example:
>>> # Pull the image tagged `latest` in the busybox repo
- >>> image = client.images.pull('busybox:latest')
+ >>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
- >>> images = client.images.pull('busybox')
+ >>> images = client.images.pull('busybox', all_tags=True)
"""
- if not tag:
- repository, tag = parse_repository_tag(repository)
+ repository, image_tag = parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
- self.client.api.pull(repository, tag=tag, **kwargs)
- if tag:
+ if 'stream' in kwargs:
+ warnings.warn(
+ '`stream` is not a valid parameter for this method'
+ ' and will be overridden'
+ )
+ del kwargs['stream']
+
+ pull_log = self.client.api.pull(
+ repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
+ )
+ for _ in pull_log:
+ # We don't do anything with the logs, but we need
+ # to keep the connection alive and wait for the image
+ # to be pulled.
+ pass
+ if not all_tags:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
diff --git a/docker/models/networks.py b/docker/models/networks.py
index be3291a..093deb7 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -46,6 +46,8 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
@@ -190,7 +192,8 @@ class NetworkCollection(Collection):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
index 0688018..37ecefb 100644
--- a/docker/models/plugins.py
+++ b/docker/models/plugins.py
@@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -117,9 +117,8 @@ class Plugin(Model):
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
- for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
- yield d
- self._reload()
+ yield from self.client.api.upgrade_plugin(self.name, remote, privileges)
+ self.reload()
class PluginCollection(Collection):
diff --git a/docker/models/resource.py b/docker/models/resource.py
index ed3900a..dec2349 100644
--- a/docker/models/resource.py
+++ b/docker/models/resource.py
@@ -1,5 +1,4 @@
-
-class Model(object):
+class Model:
"""
A base class for representing a single object on the server.
"""
@@ -18,13 +17,13 @@ class Model(object):
self.attrs = {}
def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+ return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
- return hash("%s:%s" % (self.__class__.__name__, self.id))
+ return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
@@ -49,7 +48,7 @@ class Model(object):
self.attrs = new_model.attrs
-class Collection(object):
+class Collection:
"""
A base class for representing all objects of a particular type on the
server.
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
index ca11ede..da01d44 100644
--- a/docker/models/secrets.py
+++ b/docker/models/secrets.py
@@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -30,6 +30,7 @@ class SecretCollection(Collection):
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
diff --git a/docker/models/services.py b/docker/models/services.py
index 458d2c8..200dd33 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -1,6 +1,6 @@
import copy
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
-from docker.types import TaskTemplate, ContainerSpec, ServiceMode
+from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
from .resource import Model, Collection
@@ -42,7 +42,7 @@ class Service(Model):
``label``, and ``desired-state``.
Returns:
- (:py:class:`list`): List of task dictionaries.
+ :py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
@@ -84,26 +84,27 @@ class Service(Model):
def logs(self, **kwargs):
"""
- Get log stream for the service.
- Note: This method works only for services with the ``json-file``
- or ``journald`` logging drivers.
-
- Args:
- details (bool): Show extra details provided to logs.
- Default: ``False``
- follow (bool): Keep connection open to read logs as they are
- sent by the Engine. Default: ``False``
- stdout (bool): Return logs from ``stdout``. Default: ``False``
- stderr (bool): Return logs from ``stderr``. Default: ``False``
- since (int): UNIX timestamp for the logs staring point.
- Default: 0
- timestamps (bool): Add timestamps to every log line.
- tail (string or int): Number of log lines to be returned,
- counting from the current end of the logs. Specify an
- integer or ``'all'`` to output all log lines.
- Default: ``all``
-
- Returns (generator): Logs for the service.
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns:
+ generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
@@ -118,7 +119,7 @@ class Service(Model):
replicas (int): The number of containers that should be running.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
@@ -134,7 +135,7 @@ class Service(Model):
Force update the service even if no changes require it.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
@@ -152,13 +153,22 @@ class ServiceCollection(Collection):
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
- constraints (list of str): Placement constraints.
+ constraints (list of str): :py:class:`~docker.types.Placement`
+ constraints.
+ preferences (list of tuple): :py:class:`~docker.types.Placement`
+ preferences.
+ maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
+ or (int) representing maximum number of replicas per node.
+ platforms (list of tuple): A list of platform constraints
+ expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
+ init (boolean): Run an init inside the container that forwards
+ signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
@@ -170,16 +180,19 @@ class ServiceCollection(Collection):
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
- networks (list of str): List of network names or IDs to attach
- the service to. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
- secrets (list of :py:class:`docker.types.SecretReference`): List
+ secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
@@ -195,13 +208,18 @@ class ServiceCollection(Collection):
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
- configs (:py:class:`list`): List of :py:class:`ConfigReference`
- that will be exposed to the service.
+ configs (:py:class:`list`): List of
+ :py:class:`~docker.types.ConfigReference` that will be exposed
+ to the service.
privileges (Privileges): Security options for the service's
containers.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to
+ the default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop
+ from the default set for the container.
Returns:
- (:py:class:`Service`) The created service.
+ :py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
@@ -223,7 +241,7 @@ class ServiceCollection(Collection):
into the output.
Returns:
- (:py:class:`Service`): The service.
+ :py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
@@ -248,7 +266,7 @@ class ServiceCollection(Collection):
Default: ``None``.
Returns:
- (list of :py:class:`Service`): The services.
+ list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
@@ -263,6 +281,8 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
+ 'cap_add',
+ 'cap_drop',
'command',
'configs',
'dns_config',
@@ -272,6 +292,7 @@ CONTAINER_SPEC_KWARGS = [
'hostname',
'hosts',
'image',
+ 'init',
'isolation',
'labels',
'mounts',
@@ -302,6 +323,13 @@ CREATE_SERVICE_KWARGS = [
'endpoint_spec',
]
+PLACEMENT_KWARGS = [
+ 'constraints',
+ 'preferences',
+ 'platforms',
+ 'maxreplicas',
+]
+
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
@@ -321,10 +349,12 @@ def _get_create_service_kwargs(func_name, kwargs):
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
- if 'constraints' in kwargs:
- task_template_kwargs['placement'] = {
- 'Constraints': kwargs.pop('constraints')
- }
+ placement = {}
+ for key in copy.copy(kwargs):
+ if key in PLACEMENT_KWARGS:
+ placement[key] = kwargs.pop(key)
+ placement = Placement(**placement)
+ task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index 7396e73..b0b1a2e 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -11,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
- super(Swarm, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
@@ -34,7 +34,8 @@ class Swarm(Model):
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, **kwargs):
+ force_new_cluster=False, default_addr_pool=None,
+ subnet_size=None, data_path_addr=None, **kwargs):
"""
Initialize a new swarm on this Engine.
@@ -56,6 +57,14 @@ class Swarm(Model):
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
+ default_addr_pool (list of str): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
@@ -89,7 +98,7 @@ class Swarm(Model):
created in the orchestrator.
Returns:
- ``True`` if the request went through.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -99,7 +108,8 @@ class Swarm(Model):
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
- force_new_cluster=False, snapshot_interval=5000,
+ force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
+ subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
@@ -107,11 +117,15 @@ class Swarm(Model):
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
- 'force_new_cluster': force_new_cluster
+ 'force_new_cluster': force_new_cluster,
+ 'default_addr_pool': default_addr_pool,
+ 'subnet_size': subnet_size,
+ 'data_path_addr': data_path_addr,
}
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
- self.client.api.init_swarm(**init_kwargs)
+ node_id = self.client.api.init_swarm(**init_kwargs)
self.reload()
+ return node_id
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
@@ -137,7 +151,7 @@ class Swarm(Model):
unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False,
- **kwargs):
+ rotate_manager_unlock_key=False, **kwargs):
"""
Update the swarm's configuration.
@@ -150,7 +164,8 @@ class Swarm(Model):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
-
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
@@ -164,5 +179,6 @@ class Swarm(Model):
version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
- rotate_manager_token=rotate_manager_token
+ rotate_manager_token=rotate_manager_token,
+ rotate_manager_unlock_key=rotate_manager_unlock_key
)
diff --git a/docker/tls.py b/docker/tls.py
index 4900e9f..067d556 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,10 +2,10 @@ import os
import ssl
from . import errors
-from .transport import SSLAdapter
+from .transport import SSLHTTPAdapter
-class TLSConfig(object):
+class TLSConfig:
"""
TLS configuration.
@@ -32,7 +32,7 @@ class TLSConfig(object):
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
- # leaving tls_verify=False
+ # leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -62,7 +62,7 @@ class TLSConfig(object):
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
- # "tls" and "tls_verify" must have both or neither cert/key files In
+ # "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
@@ -71,7 +71,7 @@ class TLSConfig(object):
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
- 'client_config must be a tuple of'
+ 'client_cert must be a tuple of'
' (client certificate, key file)'
)
@@ -79,7 +79,7 @@ class TLSConfig(object):
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
- ' through the client_config param'
+ ' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
@@ -88,7 +88,7 @@ class TLSConfig(object):
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
- 'Invalid CA certificate provided for `tls_ca_cert`.'
+ 'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
@@ -105,7 +105,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', SSLAdapter(
+ client.mount('https://', SSLHTTPAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index abbee18..e37fc3b 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,8 +1,13 @@
# flake8: noqa
-from .unixconn import UnixAdapter
-from .ssladapter import SSLAdapter
+from .unixconn import UnixHTTPAdapter
+from .ssladapter import SSLHTTPAdapter
try:
- from .npipeconn import NpipeAdapter
+ from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
+
+try:
+ from .sshconn import SSHHTTPAdapter
+except ImportError:
+ pass
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
new file mode 100644
index 0000000..dfbb193
--- /dev/null
+++ b/docker/transport/basehttpadapter.py
@@ -0,0 +1,8 @@
+import requests.adapters
+
+
+class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
+ def close(self):
+ super().close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index ab9b904..df67f21 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,13 +1,11 @@
-import six
+import queue
import requests.adapters
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -17,9 +15,9 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class NpipeHTTPConnection(httplib.HTTPConnection, object):
+class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
- super(NpipeHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
@@ -34,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
- super(NpipeHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
@@ -56,7 +54,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -68,20 +66,23 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
return conn or self._new_conn()
-class NpipeAdapter(requests.adapters.HTTPAdapter):
+class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, base_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(NpipeAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -90,7 +91,8 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
return pool
pool = NpipeHTTPConnectionPool(
- self.npipe_path, self.timeout
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
@@ -103,6 +105,3 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index c04b39d..766372a 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -1,7 +1,7 @@
import functools
+import time
import io
-import six
import win32file
import win32pipe
@@ -9,7 +9,7 @@ cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
-RETRY_WAIT_TIMEOUT = 10000
+MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@@ -23,7 +23,7 @@ def check_closed(f):
return wrapped
-class NpipeSocket(object):
+class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
@@ -46,8 +46,7 @@ class NpipeSocket(object):
self._closed = True
@check_closed
- def connect(self, address):
- win32pipe.WaitNamedPipe(address, self._timeout)
+ def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
@@ -65,8 +64,10 @@ class NpipeSocket(object):
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
- win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
- return self.connect(address)
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
@@ -87,10 +88,6 @@ class NpipeSocket(object):
def dup(self):
return NpipeSocket(self._handle)
- @check_closed
- def fileno(self):
- return int(self._handle)
-
def getpeername(self):
return self._address
@@ -130,9 +127,6 @@ class NpipeSocket(object):
@check_closed
def recv_into(self, buf, nbytes=0):
- if six.PY2:
- return self._recv_into_py2(buf, nbytes)
-
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
@@ -197,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket
def close(self):
- super(NpipeFileIOBase, self).close()
+ super().close()
self.sock = None
def fileno(self):
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py
new file mode 100644
index 0000000..8e6beb2
--- /dev/null
+++ b/docker/transport/sshconn.py
@@ -0,0 +1,255 @@
+import paramiko
+import queue
+import urllib.parse
+import requests.adapters
+import logging
+import os
+import signal
+import socket
+import subprocess
+
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+import http.client as httplib
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super().__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ ' '.join(args),
+ env=env,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=None if constants.IS_WINDOWS_PLATFORM else preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(httplib.HTTPConnection):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+ self.ssh_host = host
+
+ def connect(self):
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = None
+ self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
+
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super().__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urllib.parse.urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ self.ssh_conf['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ def _connect(self):
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
+
+ def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if self.ssh_client and not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super().close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
index 8fafec3..31e3014 100644
--- a/docker/transport/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -7,6 +7,8 @@ import sys
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+
try:
import requests.packages.urllib3 as urllib3
except ImportError:
@@ -22,7 +24,7 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
urllib3.connection.match_hostname = match_hostname
-class SSLAdapter(HTTPAdapter):
+class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
@@ -34,7 +36,7 @@ class SSLAdapter(HTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- super(SSLAdapter, self).__init__(**kwargs)
+ super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@@ -57,7 +59,7 @@ class SSLAdapter(HTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
- conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+ conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index c59821a..1b00762 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,8 +1,8 @@
-import six
import requests.adapters
import socket
-from six.moves import http_client as httplib
+import http.client as httplib
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
try:
@@ -14,27 +14,15 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class UnixHTTPResponse(httplib.HTTPResponse, object):
- def __init__(self, sock, *args, **kwargs):
- disable_buffering = kwargs.pop('disable_buffering', False)
- if six.PY2:
- # FIXME: We may need to disable buffering on Py3 as well,
- # but there's no clear way to do it at the moment. See:
- # https://github.com/docker/docker-py/issues/1799
- kwargs['buffering'] = not disable_buffering
- super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
-
-
-class UnixHTTPConnection(httplib.HTTPConnection, object):
+class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
- super(UnixHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
- self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -43,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.sock = sock
def putheader(self, header, *values):
- super(UnixHTTPConnection, self).putheader(header, *values)
- if header == 'Connection' and 'Upgrade' in values:
- self.disable_buffering = True
+ super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
- if self.disable_buffering:
- kwargs['disable_buffering'] = True
-
- return UnixHTTPResponse(sock, *args, **kwargs)
+ return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
- super(UnixHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
@@ -69,23 +52,26 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
)
-class UnixAdapter(requests.adapters.HTTPAdapter):
+class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, socket_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(UnixAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -94,7 +80,8 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
return pool
pool = UnixHTTPConnectionPool(
- url, self.socket_path, self.timeout
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
@@ -107,6 +94,3 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 0b0d847..b425746 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,11 +1,14 @@
# flake8: noqa
-from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .containers import (
+ ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
+)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
- Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
- ServiceMode, TaskTemplate, UpdateConfig
+ Mount, Placement, PlacementPreference, Privileges, Resources,
+ RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate,
+ UpdateConfig, NetworkAttachmentConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/base.py b/docker/types/base.py
index 6891062..8851f1e 100644
--- a/docker/types/base.py
+++ b/docker/types/base.py
@@ -1,7 +1,4 @@
-import six
-
-
class DictType(dict):
def __init__(self, init):
- for k, v in six.iteritems(init):
+ for k, v in init.items():
self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 2521420..f1b60b2 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
@@ -10,7 +8,7 @@ from .base import DictType
from .healthcheck import Healthcheck
-class LogConfigTypesEnum(object):
+class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
@@ -23,6 +21,35 @@ class LogConfigTypesEnum(object):
class LogConfig(DictType):
+ """
+ Configure logging for a container, when provided as an argument to
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+ You may refer to the
+ `official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
+ for more information.
+
+ Args:
+ type (str): Indicate which log driver to use. A set of valid drivers
+ is provided as part of the :py:attr:`LogConfig.types`
+ enum. Other values may be accepted depending on the engine version
+ and available logging plugins.
+ config (dict): A driver-dependent configuration dictionary. Please
+ refer to the driver's documentation for a list of valid config
+ keys.
+
+ Example:
+
+ >>> from docker.types import LogConfig
+ >>> lc = LogConfig(type=LogConfig.types.JSON, config={
+ ... 'max-size': '1g',
+ ... 'labels': 'production_status,geo'
+ ... })
+ >>> hc = client.create_host_config(log_config=lc)
+ >>> container = client.create_container('busybox', 'true',
+ ... host_config=hc)
+ >>> client.inspect_container(container)['HostConfig']['LogConfig']
+ {'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}}
+ """ # noqa: E501
types = LogConfigTypesEnum
def __init__(self, **kwargs):
@@ -32,7 +59,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
- super(LogConfig, self).__init__({
+ super().__init__({
'Type': log_driver_type,
'Config': config
})
@@ -50,25 +77,51 @@ class LogConfig(DictType):
return self['Config']
def set_config_value(self, key, value):
+ """ Set a the value for ``key`` to ``value`` inside the ``config``
+ dict.
+ """
self.config[key] = value
def unset_config(self, key):
+ """ Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
class Ulimit(DictType):
+ """
+ Create a ulimit declaration to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ name (str): Which ulimit will this apply to. The valid names can be
+ found in '/etc/security/limits.conf' on a gnu/linux system.
+ soft (int): The soft limit for this ulimit. Optional.
+ hard (int): The hard limit for this ulimit. Optional.
+
+ Example:
+
+ >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
+ >>> hc = client.create_host_config(ulimits=[nproc_limit])
+ >>> container = client.create_container(
+ 'busybox', 'true', host_config=hc
+ )
+ >>> client.inspect_container(container)['HostConfig']['Ulimits']
+ [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
+
+ """
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
- if not isinstance(name, six.string_types):
+ if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
- super(Ulimit, self).__init__({
+ super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
@@ -99,6 +152,104 @@ class Ulimit(DictType):
self['Hard'] = value
+class DeviceRequest(DictType):
+ """
+ Create a device request to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ driver (str): Which driver to use for this device. Optional.
+ count (int): Number or devices to request. Optional.
+ Set to -1 to request all available devices.
+ device_ids (list): List of strings for device IDs. Optional.
+ Set either ``count`` or ``device_ids``.
+ capabilities (list): List of lists of strings to request
+ capabilities. Optional. The global list acts like an OR,
+ and the sub-lists are AND. The driver will try to satisfy
+ one of the sub-lists.
+ Available capabilities for the ``nvidia`` driver can be found
+ `here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
+ options (dict): Driver-specific options. Optional.
+ """
+
+ def __init__(self, **kwargs):
+ driver = kwargs.get('driver', kwargs.get('Driver'))
+ count = kwargs.get('count', kwargs.get('Count'))
+ device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
+ capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
+ options = kwargs.get('options', kwargs.get('Options'))
+
+ if driver is None:
+ driver = ''
+ elif not isinstance(driver, str):
+ raise ValueError('DeviceRequest.driver must be a string')
+ if count is None:
+ count = 0
+ elif not isinstance(count, int):
+ raise ValueError('DeviceRequest.count must be an integer')
+ if device_ids is None:
+ device_ids = []
+ elif not isinstance(device_ids, list):
+ raise ValueError('DeviceRequest.device_ids must be a list')
+ if capabilities is None:
+ capabilities = []
+ elif not isinstance(capabilities, list):
+ raise ValueError('DeviceRequest.capabilities must be a list')
+ if options is None:
+ options = {}
+ elif not isinstance(options, dict):
+ raise ValueError('DeviceRequest.options must be a dict')
+
+ super().__init__({
+ 'Driver': driver,
+ 'Count': count,
+ 'DeviceIDs': device_ids,
+ 'Capabilities': capabilities,
+ 'Options': options
+ })
+
+ @property
+ def driver(self):
+ return self['Driver']
+
+ @driver.setter
+ def driver(self, value):
+ self['Driver'] = value
+
+ @property
+ def count(self):
+ return self['Count']
+
+ @count.setter
+ def count(self, value):
+ self['Count'] = value
+
+ @property
+ def device_ids(self):
+ return self['DeviceIDs']
+
+ @device_ids.setter
+ def device_ids(self, value):
+ self['DeviceIDs'] = value
+
+ @property
+ def capabilities(self):
+ return self['Capabilities']
+
+ @capabilities.setter
+ def capabilities(self, value):
+ self['Capabilities'] = value
+
+ @property
+ def options(self):
+ return self['Options']
+
+ @options.setter
+ def options(self, value):
+ self['Options'] = value
+
+
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
@@ -115,13 +266,13 @@ class HostConfig(dict):
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None,
- isolation=None, auto_remove=False, storage_opt=None,
- init=None, init_path=None, volume_driver=None,
- cpu_count=None, cpu_percent=None, nano_cpus=None,
- cpuset_mems=None, runtime=None, mounts=None,
+ cpuset_cpus=None, userns_mode=None, uts_mode=None,
+ pids_limit=None, isolation=None, auto_remove=False,
+ storage_opt=None, init=None, init_path=None,
+ volume_driver=None, cpu_count=None, cpu_percent=None,
+ nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
- device_cgroup_rules=None):
+ device_cgroup_rules=None, device_requests=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@@ -144,7 +295,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
- if isinstance(shm_size, six.string_types):
+ if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
@@ -181,10 +332,11 @@ class HostConfig(dict):
if dns_search:
self['DnsSearch'] = dns_search
- if network_mode:
- self['NetworkMode'] = network_mode
- elif network_mode is None:
- self['NetworkMode'] = 'default'
+ if network_mode == 'host' and port_bindings:
+ raise host_config_incompatible_error(
+ 'network_mode', 'host', 'port_bindings'
+ )
+ self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
@@ -204,7 +356,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
- self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+ self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
@@ -224,11 +376,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- self['Sysctls'][k] = six.text_type(v)
+ for k, v in sysctls.items():
+ self['Sysctls'][k] = str(v)
if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
+ if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
@@ -250,7 +402,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict):
formatted = []
- for k, v in six.iteritems(lxc_conf):
+ for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
@@ -264,10 +416,10 @@ class HostConfig(dict):
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- self['Ulimits'].append(l)
+ for lmt in ulimits:
+ if not isinstance(lmt, Ulimit):
+ lmt = Ulimit(**lmt)
+ self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
@@ -392,6 +544,11 @@ class HostConfig(dict):
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
+ if uts_mode:
+ if uts_mode != "host":
+ raise host_config_value_error("uts_mode", uts_mode)
+ self['UTSMode'] = uts_mode
+
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
@@ -400,7 +557,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit
if isolation:
- if not isinstance(isolation, six.string_types):
+ if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
@@ -450,7 +607,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent
if nano_cpus:
- if not isinstance(nano_cpus, six.integer_types):
+ if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
@@ -476,6 +633,19 @@ class HostConfig(dict):
)
self['DeviceCgroupRules'] = device_cgroup_rules
+ if device_requests is not None:
+ if version_lt(version, '1.40'):
+ raise host_config_version_error('device_requests', '1.40')
+ if not isinstance(device_requests, list):
+ raise host_config_type_error(
+ 'device_requests', device_requests, 'list'
+ )
+ self['DeviceRequests'] = []
+ for req in device_requests:
+ if not isinstance(req, DeviceRequest):
+ req = DeviceRequest(**req)
+ self['DeviceRequests'].append(req)
+
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@@ -493,6 +663,13 @@ def host_config_value_error(param, param_value):
return ValueError(error_msg.format(param, param_value))
+def host_config_incompatible_error(param, param_value, incompatible_param):
+ error_msg = '\"{1}\" {0} is incompatible with {2}'
+ return errors.InvalidArgument(
+ error_msg.format(param, param_value, incompatible_param)
+ )
+
+
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
@@ -520,17 +697,17 @@ class ContainerConfig(dict):
'version 1.29'
)
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
- if isinstance(entrypoint, six.string_types):
+ if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
+ labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
@@ -541,10 +718,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
@@ -573,7 +750,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
+ 'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index ee8624e..10e8101 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -5,15 +5,17 @@ try:
except ImportError:
import urllib3
+from ..errors import DockerException
-class CancellableStream(object):
+
+class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -30,7 +32,7 @@ class CancellableStream(object):
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
- except socket.error:
+ except OSError:
raise StopIteration
next = __next__
@@ -55,9 +57,17 @@ class CancellableStream(object):
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
else:
sock = sock_fp._sock
- if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 61857c2..dfc88a9 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -1,7 +1,5 @@
from .base import DictType
-import six
-
class Healthcheck(DictType):
"""
@@ -14,7 +12,7 @@ class Healthcheck(DictType):
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- - ``["CMD-SHELL", command]``: RUn command in the system's
+ - ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
@@ -23,15 +21,15 @@ class Healthcheck(DictType):
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
- retries (integer): The number of consecutive failures needed to
+ retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (integer): Start period for the container to
+ start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
- if isinstance(test, six.string_types):
+ if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
@@ -39,7 +37,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
- super(Healthcheck, self).__init__({
+ super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
@@ -53,6 +51,8 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
+ if isinstance(value, str):
+ value = ["CMD-SHELL", value]
self['Test'] = value
@property
diff --git a/docker/types/networks.py b/docker/types/networks.py
index 1c7b2c9..1370dc1 100644
--- a/docker/types/networks.py
+++ b/docker/types/networks.py
@@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
- ipv6_address=None, link_local_ips=None):
+ ipv6_address=None, link_local_ips=None, driver_opt=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@@ -33,6 +33,15 @@ class EndpointConfig(dict):
if ipam_config:
self['IPAMConfig'] = ipam_config
+ if driver_opt:
+ if version_lt(version, '1.32'):
+ raise errors.InvalidVersion(
+ 'DriverOpts is not supported for API version < 1.32'
+ )
+ if not isinstance(driver_opt, dict):
+ raise TypeError('driver_opt must be a dictionary')
+ self['DriverOpts'] = driver_opt
+
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):
diff --git a/docker/types/services.py b/docker/types/services.py
index 31f4750..fe7cc26 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
@@ -26,8 +24,8 @@ class TaskTemplate(dict):
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
- networks (:py:class:`list`): List of network names or IDs to attach
- the containers to.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
@@ -110,16 +108,23 @@ class ContainerSpec(dict):
privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
+ init (boolean): Run an init inside the container that forwards signals
+ and reaps processes.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to the
+ default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
+ the default set for the container.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
- privileges=None, isolation=None):
+ privileges=None, isolation=None, init=None, cap_add=None,
+ cap_drop=None):
self['Image'] = image
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
@@ -149,7 +154,7 @@ class ContainerSpec(dict):
if mounts is not None:
parsed_mounts = []
for mount in mounts:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
@@ -183,6 +188,21 @@ class ContainerSpec(dict):
if isolation is not None:
self['Isolation'] = isolation
+ if init is not None:
+ self['Init'] = init
+
+ if cap_add is not None:
+ if not isinstance(cap_add, list):
+ raise TypeError('cap_add must be a list')
+
+ self['CapabilityAdd'] = cap_add
+
+ if cap_drop is not None:
+ if not isinstance(cap_drop, list):
+ raise TypeError('cap_drop must be a list')
+
+ self['CapabilityDrop'] = cap_drop
+
class Mount(dict):
"""
@@ -219,7 +239,7 @@ class Mount(dict):
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
- 'Unsupported mount type: "{}"'.format(type)
+ f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
@@ -255,7 +275,7 @@ class Mount(dict):
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
- if not isinstance(tmpfs_mode, six.integer_types):
+ if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
@@ -275,7 +295,7 @@ class Mount(dict):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
- 'Invalid mount format "{0}"'.format(string)
+ f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
@@ -342,7 +362,7 @@ def _convert_generic_resources_dict(generic_resources):
' (found {})'.format(type(generic_resources))
)
resources = []
- for kind, value in six.iteritems(generic_resources):
+ for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
@@ -368,10 +388,11 @@ class UpdateConfig(dict):
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
- delay (int): Amount of time between updates.
+ delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
- ``continue`` and ``pause``. Default: ``continue``
+ ``continue``, ``pause``, as well as ``rollback`` since API v1.28.
+ Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
@@ -385,9 +406,9 @@ class UpdateConfig(dict):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
- if failure_action not in ('pause', 'continue'):
+ if failure_action not in ('pause', 'continue', 'rollback'):
raise errors.InvalidArgument(
- 'failure_action must be either `pause` or `continue`.'
+ 'failure_action must be one of `pause`, `continue`, `rollback`'
)
self['FailureAction'] = failure_action
@@ -413,7 +434,31 @@ class UpdateConfig(dict):
self['Order'] = order
-class RestartConditionTypesEnum(object):
+class RollbackConfig(UpdateConfig):
+ """
+ Used to specify the way containe rollbacks should be performed by a service
+
+ Args:
+ parallelism (int): Maximum number of tasks to be rolled back in one
+ iteration (0 means unlimited parallelism). Default: 0
+ delay (int): Amount of time between rollbacks, in nanoseconds.
+ failure_action (string): Action to take if a rolled back task fails to
+ run, or stops running during the rollback. Acceptable values are
+ ``continue``, ``pause`` or ``rollback``.
+ Default: ``continue``
+ monitor (int): Amount of time to monitor each rolled back task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ a rollback before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out a
+ rolled back task. Either ``start_first`` or ``stop_first`` are
+ accepted.
+ """
+ pass
+
+
+class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
@@ -444,7 +489,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
- 'Invalid RestartPolicy condition {0}'.format(condition)
+ f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
@@ -503,7 +548,7 @@ def convert_service_ports(ports):
)
result = []
- for k, v in six.iteritems(ports):
+ for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
@@ -623,18 +668,28 @@ class Placement(dict):
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
- constraints (:py:class:`list`): A list of constraints
- preferences (:py:class:`list`): Preferences provide a way to make
- the scheduler aware of factors such as topology. They are
- provided in order from highest to lowest precedence.
- platforms (:py:class:`list`): A list of platforms expressed as
- ``(arch, os)`` tuples
- """
- def __init__(self, constraints=None, preferences=None, platforms=None):
+ constraints (:py:class:`list` of str): A list of constraints
+ preferences (:py:class:`list` of tuple): Preferences provide a way
+ to make the scheduler aware of factors such as topology. They
+ are provided in order from highest to lowest precedence and
+ are expressed as ``(strategy, descriptor)`` tuples. See
+ :py:class:`PlacementPreference` for details.
+ maxreplicas (int): Maximum number of replicas per node
+ platforms (:py:class:`list` of tuple): A list of platforms
+ expressed as ``(arch, os)`` tuples
+ """
+ def __init__(self, constraints=None, preferences=None, platforms=None,
+ maxreplicas=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
- self['Preferences'] = preferences
+ self['Preferences'] = []
+ for pref in preferences:
+ if isinstance(pref, tuple):
+ pref = PlacementPreference(*pref)
+ self['Preferences'].append(pref)
+ if maxreplicas is not None:
+ self['MaxReplicas'] = maxreplicas
if platforms:
self['Platforms'] = []
for plat in platforms:
@@ -643,6 +698,27 @@ class Placement(dict):
})
+class PlacementPreference(dict):
+ """
+ Placement preference to be used as an element in the list of
+ preferences for :py:class:`Placement` objects.
+
+ Args:
+ strategy (string): The placement strategy to implement. Currently,
+ the only supported strategy is ``spread``.
+ descriptor (string): A label descriptor. For the spread strategy,
+ the scheduler will try to spread tasks evenly over groups of
+ nodes identified by this label.
+ """
+ def __init__(self, strategy, descriptor):
+ if strategy != 'spread':
+ raise errors.InvalidArgument(
+ 'PlacementPreference strategy value is invalid ({}):'
+ ' must be "spread".'.format(strategy)
+ )
+ self['Spread'] = {'SpreadDescriptor': descriptor}
+
+
class DNSConfig(dict):
"""
Specification for DNS related configurations in resolver configuration
@@ -662,7 +738,7 @@ class DNSConfig(dict):
class Privileges(dict):
- """
+ r"""
Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition.
@@ -713,3 +789,21 @@ class Privileges(dict):
if len(selinux_context) > 0:
self['SELinuxContext'] = selinux_context
+
+
+class NetworkAttachmentConfig(dict):
+ """
+ Network attachment options for a service.
+
+ Args:
+ target (str): The target network for attachment.
+ Can be a network name or ID.
+ aliases (:py:class:`list`): A list of discoverable alternate names
+ for the service.
+ options (:py:class:`dict`): Driver attachment options for the
+ network target.
+ """
+ def __init__(self, target, aliases=None, options=None):
+ self['Target'] = target
+ self['Aliases'] = aliases
+ self['DriverOpts'] = options
diff --git a/docker/utils/build.py b/docker/utils/build.py
index 4fa5751..ac06043 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -4,8 +4,6 @@ import re
import tarfile
import tempfile
-import six
-
from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
@@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
- extra_names = set(e[0] for e in extra_files)
+ extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
@@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
- except IOError:
- raise IOError(
- 'Can not read file in context: {}'.format(full_path)
+ except OSError:
+ raise OSError(
+ f'Can not read file in context: {full_path}'
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
@@ -105,8 +103,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
for name, contents in extra_files:
info = tarfile.TarInfo(name)
- info.size = len(contents)
- t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
@@ -118,12 +117,8 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
@@ -153,7 +148,7 @@ def walk(root, patterns, default=True):
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
-class PatternMatcher(object):
+class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
@@ -211,13 +206,12 @@ class PatternMatcher(object):
break
if skip:
continue
- for sub in rec_walk(cur):
- yield sub
+ yield from rec_walk(cur)
return rec_walk(root)
-class Pattern(object):
+class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
diff --git a/docker/utils/config.py b/docker/utils/config.py
index 82a0e2a..8e24959 100644
--- a/docker/utils/config.py
+++ b/docker/utils/config.py
@@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
- log.debug("Trying paths: {0}".format(repr(paths)))
+ log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
+ log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
@@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try:
with open(config_file) as f:
return json.load(f)
- except (IOError, ValueError) as e:
+ except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index c975d4b..cf1baf4 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -27,7 +27,7 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
- '{0} is not available for version < {1}'.format(
+ '{} is not available for version < {}'.format(
f.__name__, version
)
)
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
index cc940a2..90e9f60 100644
--- a/docker/utils/fnmatch.py
+++ b/docker/utils/fnmatch.py
@@ -108,7 +108,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
+ res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
index addffdf..f384175 100644
--- a/docker/utils/json_stream.py
+++ b/docker/utils/json_stream.py
@@ -1,11 +1,6 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import json
import json.decoder
-import six
-
from ..errors import StreamParseError
@@ -20,7 +15,7 @@ def stream_as_text(stream):
instead of byte streams.
"""
for data in stream:
- if not isinstance(data, six.text_type):
+ if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
@@ -46,8 +41,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode)
-def line_splitter(buffer, separator=u'\n'):
- index = buffer.find(six.text_type(separator))
+def line_splitter(buffer, separator='\n'):
+ index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
@@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input.
"""
splitter = splitter or line_splitter
- buffered = six.text_type('')
+ buffered = ''
for data in stream_as_text(stream):
buffered += data
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index bf7d697..e813936 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -3,11 +3,11 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
- "((?P<host>[a-fA-F\d.:]+):)?" # Address
- "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
- "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
- "(?P<proto>/(udp|tcp))?" # Protocol
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
@@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end:
return [start + proto]
if randomly_available_port:
- return ['{}-{}'.format(start, end) + proto]
+ return [f'{start}-{end}' + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py
new file mode 100644
index 0000000..49e98ed
--- /dev/null
+++ b/docker/utils/proxy.py
@@ -0,0 +1,73 @@
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format(
+ self.http, self.https, self.ftp, self.no_proxy)
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 7b96d4f..4a2076e 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -4,14 +4,16 @@ import select
import socket as pysocket
import struct
-import six
-
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
+STDOUT = 1
+STDERR = 2
+
+
class SocketError(Exception):
pass
@@ -23,16 +25,16 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- if six.PY3 and not isinstance(socket, NpipeSocket):
+ if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
- if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
- except EnvironmentError as e:
+ except OSError as e:
if e.errno not in recoverable_errors:
raise
@@ -42,7 +44,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
- data = six.binary_type()
+ data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
@@ -51,28 +53,43 @@ def read_exactly(socket, n):
return data
-def next_frame_size(socket):
+def next_frame_header(socket):
"""
- Returns the size of the next frame of data waiting to be read from socket,
- according to the protocol defined here:
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
- https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
- return -1
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
- _, actual = struct.unpack('>BxxxL', data)
- return actual
+
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
-def frames_iter(socket):
+def frames_iter_no_tty(socket):
"""
- Returns a generator of frames read from socket
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
"""
while True:
- n = next_frame_size(socket)
+ (stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
@@ -84,13 +101,13 @@ def frames_iter(socket):
# We have reached EOF
return
n -= data_length
- yield result
+ yield (stream, result)
-def socket_raw_iter(socket):
+def frames_iter_tty(socket):
"""
- Returns a generator of data read from the socket.
- This is used for non-multiplexed streams.
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
"""
while True:
result = read(socket)
@@ -98,3 +115,53 @@ def socket_raw_iter(socket):
# We have reached EOF
return
yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return bytes().join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ assert frame != (None, None)
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError(f'{stream_id} is not a valid stream')
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index fe3b9a5..f7c3dd7 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,31 +1,20 @@
import base64
+import json
import os
import os.path
-import json
import shlex
-from distutils.version import StrictVersion
+import string
from datetime import datetime
-
-import six
+from distutils.version import StrictVersion
from .. import errors
from .. import tls
+from ..constants import DEFAULT_HTTP_HOST
+from ..constants import DEFAULT_UNIX_SOCKET
+from ..constants import DEFAULT_NPIPE
+from ..constants import BYTE_UNITS
-if six.PY2:
- from urllib import splitnport
-else:
- from urllib.parse import splitnport
-
-DEFAULT_HTTP_HOST = "127.0.0.1"
-DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
-DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
-
-BYTE_UNITS = {
- 'b': 1,
- 'k': 1024,
- 'm': 1024 * 1024,
- 'g': 1024 * 1024 * 1024
-}
+from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
@@ -44,8 +33,7 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header):
data = base64.b64decode(header)
- if six.PY3:
- data = data.decode('utf-8')
+ data = data.decode('utf-8')
return json.loads(data)
@@ -85,7 +73,7 @@ def _convert_port_binding(binding):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
- elif isinstance(binding[0], six.string_types):
+ elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
@@ -109,7 +97,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings):
result = {}
- for k, v in six.iteritems(port_bindings):
+ for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
@@ -126,7 +114,7 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
- if isinstance(k, six.binary_type):
+ if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
@@ -137,7 +125,7 @@ def convert_volume_binds(binds):
)
bind = v['bind']
- if isinstance(bind, six.binary_type):
+ if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
@@ -148,13 +136,13 @@ def convert_volume_binds(binds):
mode = 'rw'
result.append(
- six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ f'{k}:{bind}:{mode}'
)
else:
- if isinstance(v, six.binary_type):
+ if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
- six.text_type('{0}:{1}:rw').format(k, v)
+ f'{k}:{v}:rw'
)
return result
@@ -171,7 +159,7 @@ def convert_tmpfs_mounts(tmpfs):
result = {}
for mount in tmpfs:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
@@ -196,7 +184,7 @@ def convert_service_networks(networks):
result = []
for n in networks:
- if isinstance(n, six.string_types):
+ if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
@@ -212,75 +200,93 @@ def parse_repository_tag(repo_name):
return repo_name, None
-# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
-# fd:// protocol unsupported (for obvious reasons)
-# Added support for http and https
-# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
- proto = "http+unix"
- port = None
path = ''
+ port = None
+ host = None
+ # Sensible defaults
if not addr and is_win32:
- addr = DEFAULT_NPIPE
-
+ return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
- if addr.startswith('http://'):
- addr = addr.replace('http://', 'tcp://')
- if addr.startswith('http+unix://'):
- addr = addr.replace('http+unix://', 'unix://')
- if addr == 'tcp://':
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse('//' + addr, 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
- "Invalid bind address format: {0}".format(addr)
+ f"Invalid bind address protocol: {addr}"
)
- elif addr.startswith('unix://'):
- addr = addr[7:]
- elif addr.startswith('tcp://'):
- proto = 'http{0}'.format('s' if tls else '')
- addr = addr[6:]
- elif addr.startswith('https://'):
- proto = "https"
- addr = addr[8:]
- elif addr.startswith('npipe://'):
- proto = 'npipe'
- addr = addr[8:]
- elif addr.startswith('fd://'):
- raise errors.DockerException("fd protocol is not implemented")
- else:
- if "://" in addr:
- raise errors.DockerException(
- "Invalid bind address protocol: {0}".format(addr)
- )
- proto = "https" if tls else "http"
- if proto in ("http", "https"):
- address_parts = addr.split('/', 1)
- host = address_parts[0]
- if len(address_parts) == 2:
- path = '/' + address_parts[1]
- host, port = splitnport(host)
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
- if port is None:
- raise errors.DockerException(
- "Invalid port: {0}".format(addr)
- )
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: no path allowed for this protocol:'
+ ' {}'.format(addr)
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = '/'.join((parsed_url.hostname, path))
+
+ if proto in ('tcp', 'ssh'):
+ # parsed_url.hostname strips brackets from IPv6 addresses,
+ # which can be problematic hence our use of splitnport() instead.
+ host, port = splitnport(parsed_url.netloc)
+ if port is None or port < 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: port is required:'
+ ' {}'.format(addr)
+ )
+ port = 22
if not host:
host = DEFAULT_HTTP_HOST
- else:
- host = addr
- if proto in ("http", "https") and port == -1:
- raise errors.DockerException(
- "Bind address needs a port: {0}".format(addr))
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = 'http{}'.format('s' if tls else '')
+ elif proto == 'unix':
+ proto = 'http+unix'
- if proto == "http+unix" or proto == 'npipe':
- return "{0}://{1}".format(proto, host).rstrip('/')
- return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
+ if proto in ('http+unix', 'npipe'):
+ return f"{proto}://{path}".rstrip('/')
+ return f'{proto}://{host}:{port}{path}'.rstrip('/')
def parse_devices(devices):
@@ -289,9 +295,9 @@ def parse_devices(devices):
if isinstance(device, dict):
device_list.append(device)
continue
- if not isinstance(device, six.string_types):
+ if not isinstance(device, str):
raise errors.DockerException(
- 'Invalid device type {0}'.format(type(device))
+ f'Invalid device type {type(device)}'
)
device_mapping = device.split(':')
if device_mapping:
@@ -332,9 +338,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
params = {}
if host:
- params['base_url'] = (
- host.replace('tcp://', 'https://') if enable_tls else host
- )
+ params['base_url'] = host
if not enable_tls:
return params
@@ -361,12 +365,15 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
def convert_filters(filters):
result = {}
- for k, v in six.iteritems(filters):
+ for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
- result[k] = v
+ result[k] = [
+ str(item) if not isinstance(item, str) else item
+ for item in v
+ ]
return json.dumps(result)
@@ -377,7 +384,7 @@ def datetime_to_timestamp(dt):
def parse_bytes(s):
- if isinstance(s, six.integer_types + (float,)):
+ if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
@@ -398,10 +405,10 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit():
try:
- digits = int(digits_part)
+ digits = float(digits_part)
except ValueError:
raise errors.DockerException(
- 'Failed converting the string value for memory ({0}) to'
+ 'Failed converting the string value for memory ({}) to'
' an integer.'.format(digits_part)
)
@@ -409,7 +416,7 @@ def parse_bytes(s):
s = int(digits * units[suffix])
else:
raise errors.DockerException(
- 'The specified value for memory ({0}) should specify the'
+ 'The specified value for memory ({}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
@@ -419,9 +426,9 @@ def parse_bytes(s):
def normalize_links(links):
if isinstance(links, dict):
- links = six.iteritems(links)
+ links = iter(links.items())
- return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
+ return [f'{k}:{v}' if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
@@ -431,7 +438,7 @@ def parse_env_file(env_file):
"""
environment = {}
- with open(env_file, 'r') as f:
+ with open(env_file) as f:
for line in f:
if line[0] == '#':
@@ -447,15 +454,13 @@ def parse_env_file(env_file):
environment[k] = v
else:
raise errors.DockerException(
- 'Invalid line in environment file {0}:\n{1}'.format(
+ 'Invalid line in environment file {}:\n{}'.format(
env_file, line))
return environment
def split_command(command):
- if six.PY2 and not isinstance(command, six.binary_type):
- command = command.encode('utf-8')
return shlex.split(command)
@@ -463,22 +468,22 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
- if isinstance(value, six.binary_type):
+ if isinstance(value, bytes):
value = value.decode('utf-8')
- return u'{key}={value}'.format(key=key, value=value)
- return [format_env(*var) for var in six.iteritems(environment)]
+ return f'{key}={value}'
+ return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
- '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
]
return [
- '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
]
diff --git a/docker/version.py b/docker/version.py
index d451374..4259432 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "3.4.1"
-version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
+version = "5.0.3"
+version_info = tuple(int(d) for d in version.split("-")[0].split("."))
diff --git a/requirements.txt b/requirements.txt
index 7d0afbe..34a3083 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,18 +1,16 @@
appdirs>=1.4.3
asn1crypto>=0.22.0
backports.ssl-match-hostname>=3.5.0.1
-cffi>=1.10.0
-cryptography>=1.9
-docker-pycreds>=0.3.0
+cffi>=1.14.4
+cryptography>=3.2
enum34>=1.1.6
idna>=2.5
ipaddress>=1.0.18
packaging>=16.8
+paramiko>=2.4.2
pycparser>=2.17
-pyOpenSSL>=17.0.0
+pyOpenSSL>=18.0.0
pyparsing>=2.2.0
-pypiwin32>=219; sys_platform == 'win32' and python_version < '3.6'
-pypiwin32>=220; sys_platform == 'win32' and python_version >= '3.6'
-requests>=2.14.2
-six>=1.10.0
-websocket-client>=0.40.0
+requests>=2.25.0
+urllib3>=1.26.5
+websocket-client>=0.56.0
diff --git a/setup.py b/setup.py
index 57b2b5a..5b9945d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,22 @@
#!/usr/bin/env python
-from __future__ import print_function
import codecs
import os
-from setuptools import setup, find_packages
+from setuptools import find_packages
+from setuptools import setup
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.14.2, != 2.18.0',
- 'six >= 1.4.0',
'websocket-client >= 0.32.0',
- 'docker-pycreds >= 0.3.0'
+ 'requests >= 2.14.2, != 2.18.0',
]
extras_require = {
- ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
- # While not imported explicitly, the ipaddress module is required for
- # ssl_match_hostname to verify hosts match with certificates via
- # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
- ':python_version < "3.3"': 'ipaddress >= 1.0.16',
-
# win32 APIs if on Windows (required for npipe support)
- # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
- # on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
- ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
- ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
+ ':sys_platform == "win32"': 'pywin32==227',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
@@ -37,7 +26,11 @@ extras_require = {
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
- 'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.2', 'idna>=2.0.0'],
+
+ # Only required when connecting using the ssh:// protocol
+ 'ssh': ['paramiko>=2.4.2'],
+
}
version = None
@@ -48,24 +41,27 @@ with open('./test-requirements.txt') as test_reqs_txt:
long_description = ''
-try:
- with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
- long_description = readme_rst.read()
-except IOError:
- # README.rst is only generated on release. Its absence should not prevent
- # setup.py from working properly.
- pass
+with codecs.open('./README.md', encoding='utf-8') as readme_md:
+ long_description = readme_md.read()
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
+ long_description_content_type='text/markdown',
url='https://github.com/docker/docker-py',
+ project_urls={
+ 'Documentation': 'https://docker-py.readthedocs.io',
+ 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
+ 'Source': 'https://github.com/docker/docker-py',
+ 'Tracker': 'https://github.com/docker/docker-py/issues',
+ },
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
+ python_requires='>=3.6',
zip_safe=False,
test_suite='tests',
classifiers=[
@@ -74,16 +70,15 @@ setup(
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
- maintainer='Joffrey F',
- maintainer_email='joffrey@docker.com',
+ maintainer='Ulysses Souza',
+ maintainer_email='ulysses.souza@docker.com',
)
diff --git a/test-requirements.txt b/test-requirements.txt
index 3396b1c..585e3fd 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,6 +1,7 @@
-coverage>=3.7.1
-flake8>=3.4.1
+setuptools>=54.1.1
+coverage>=4.5.2
+flake8>=3.6.0
mock>=1.0.1
-pytest>=2.9.1
-pytest-cov>=2.1.0
-pytest-timeout>=1.2.1
+pytest>=4.3.1
+pytest-cov>=2.6.1
+pytest-timeout>=1.3.3
diff --git a/tests/gpg-keys/ownertrust b/tests/gpg-keys/ownertrust
new file mode 100644
index 0000000..141ea57
--- /dev/null
+++ b/tests/gpg-keys/ownertrust
@@ -0,0 +1,3 @@
+# List of assigned trustvalues, created Wed 25 Apr 2018 01:28:17 PM PDT
+# (Use "gpg --import-ownertrust" to restore them)
+9781B87DAB042E6FD51388A5464ED987A7B21401:6:
diff --git a/tests/gpg-keys/secret b/tests/gpg-keys/secret
new file mode 100644
index 0000000..412294d
--- /dev/null
+++ b/tests/gpg-keys/secret
Binary files differ
diff --git a/tests/helpers.py b/tests/helpers.py
index b36d6d7..63cbe2e 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -2,14 +2,14 @@ import functools
import os
import os.path
import random
+import re
+import socket
import tarfile
import tempfile
import time
-import re
-import six
-import socket
import docker
+import paramiko
import pytest
@@ -53,7 +53,7 @@ def requires_api_version(version):
return pytest.mark.skipif(
docker.utils.version_lt(test_version, version),
- reason="API version is too low (< {0})".format(version)
+ reason=f"API version is too low (< {version})"
)
@@ -85,7 +85,7 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
def random_name():
- return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+ return f'dockerpytest_{random.getrandbits(64):x}'
def force_leave_swarm(client):
@@ -104,11 +104,11 @@ def force_leave_swarm(client):
def swarm_listen_addr():
- return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
+ return f'0.0.0.0:{random.randrange(10000, 25000)}'
def assert_cat_socket_detached_with_keys(sock, inputs):
- if six.PY3 and hasattr(sock, '_sock'):
+ if hasattr(sock, '_sock'):
sock = sock._sock
for i in inputs:
@@ -118,10 +118,18 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
# If we're using a Unix socket, the sock.send call will fail with a
# BrokenPipeError ; INET sockets will just stop receiving / sending data
# but will not raise an error
- if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
- with pytest.raises(socket.error):
+ if isinstance(sock, paramiko.Channel):
+ with pytest.raises(OSError):
sock.sendall(b'make sure the socket is closed\n')
else:
+ if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
+ # We do not want to use pytest.raises here because future versions
+ # of the daemon no longer cause this to raise an error.
+ try:
+ sock.sendall(b'make sure the socket is closed\n')
+ except OSError:
+ return
+
sock.sendall(b"make sure the socket is closed\n")
data = sock.recv(128)
# New in 18.06: error message is broadcast over the socket when reading
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
index baaf33e..ef48e12 100644
--- a/tests/integration/api_build_test.py
+++ b/tests/integration/api_build_test.py
@@ -4,15 +4,57 @@ import shutil
import tempfile
from docker import errors
+from docker.utils.proxy import ProxyConfig
import pytest
-import six
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
from ..helpers import random_name, requires_api_version, requires_experimental
class BuildTest(BaseAPIIntegrationTest):
+ def test_build_with_proxy(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=a"',
+ 'RUN env | grep "ftp_proxy=a"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script, decode=True)
+
+ def test_build_with_proxy_and_buildargs(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=XXX"',
+ 'RUN env | grep "ftp_proxy=xxx"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(
+ fileobj=script,
+ decode=True,
+ buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'}
+ )
+
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
@@ -28,9 +70,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert len(logs) > 0
def test_build_from_stringio(self):
- if six.PY3:
- return
- script = io.StringIO(six.text_type('\n').join([
+ return
+ script = io.StringIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
@@ -40,8 +81,7 @@ class BuildTest(BaseAPIIntegrationTest):
stream = self.client.build(fileobj=script)
logs = ''
for chunk in stream:
- if six.PY3:
- chunk = chunk.decode('utf-8')
+ chunk = chunk.decode('utf-8')
logs += chunk
assert logs != ''
@@ -92,8 +132,7 @@ class BuildTest(BaseAPIIntegrationTest):
self.client.wait(c)
logs = self.client.logs(c)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert sorted(list(filter(None, logs.split('\n')))) == sorted([
'/test/#file.txt',
@@ -234,7 +273,7 @@ class BuildTest(BaseAPIIntegrationTest):
# Set up pingable endpoint on custom network
network = self.client.create_network(random_name())['Id']
self.tmp_networks.append(network)
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
self.client.connect_container_to_network(
@@ -296,10 +335,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert self.client.inspect_image(img_name)
ctnr = self.run_container(img_name, 'cat /hosts-file')
- self.tmp_containers.append(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert '127.0.0.1\textrahost.local.test' in logs
assert '127.0.0.1\thello.world.test' in logs
@@ -334,7 +371,7 @@ class BuildTest(BaseAPIIntegrationTest):
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
- 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
+ f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
]))
stream = self.client.build(
@@ -398,15 +435,17 @@ class BuildTest(BaseAPIIntegrationTest):
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_build_invalid_platform(self):
- script = io.BytesIO('FROM busybox\n'.encode('ascii'))
+ script = io.BytesIO(b'FROM busybox\n')
with pytest.raises(errors.APIError) as excinfo:
stream = self.client.build(fileobj=script, platform='foobar')
for _ in stream:
pass
- assert excinfo.value.status_code == 400
- assert 'invalid platform' in excinfo.exconly()
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
def test_build_out_of_context_dockerfile(self):
base_dir = tempfile.mkdtemp()
@@ -540,6 +579,11 @@ class BuildTest(BaseAPIIntegrationTest):
) == sorted(lsdata)
@requires_api_version('1.31')
+ @pytest.mark.xfail(
+ True,
+ reason='Currently fails on 18.09: '
+ 'https://github.com/moby/moby/issues/37920'
+ )
def test_prune_builds(self):
prune_result = self.client.prune_builds()
assert 'SpaceReclaimed' in prune_result
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
index 905e064..d1622fa 100644
--- a/tests/integration/api_client_test.py
+++ b/tests/integration/api_client_test.py
@@ -47,7 +47,7 @@ class ConnectionTimeoutTest(unittest.TestCase):
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
- except:
+ except: # noqa: E722
pass
end = time.time()
assert res is None
@@ -72,6 +72,6 @@ class UnixconnTest(unittest.TestCase):
client.close()
del client
- assert len(w) == 0, "No warnings produced: {0}".format(
+ assert len(w) == 0, "No warnings produced: {}".format(
w[0].message
)
diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py
index 0ffd767..82cb516 100644
--- a/tests/integration/api_config_test.py
+++ b/tests/integration/api_config_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import docker
import pytest
@@ -31,7 +29,7 @@ class ConfigAPITest(BaseAPIIntegrationTest):
def test_create_config_unicode_data(self):
config_id = self.client.create_config(
- 'favorite_character', u'いざよいさくや'
+ 'favorite_character', 'いざよいさくや'
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
@@ -70,3 +68,16 @@ class ConfigAPITest(BaseAPIIntegrationTest):
data = self.client.configs(filters={'name': ['favorite_character']})
assert len(data) == 1
assert data[0]['ID'] == config_id['ID']
+
+ @requires_api_version('1.37')
+ def test_create_config_with_templating(self):
+ config_id = self.client.create_config(
+ 'favorite_character', 'sakuya izayoi',
+ templating={ 'name': 'golang'}
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+ assert 'Templating' in data['Spec']
+ assert data['Spec']['Templating']['Name'] == 'golang'
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
index ff70148..9da2cfb 100644
--- a/tests/integration/api_container_test.py
+++ b/tests/integration/api_container_test.py
@@ -5,28 +5,26 @@ import tempfile
import threading
from datetime import datetime
-import docker
-from docker.constants import IS_WINDOWS_PLATFORM
-from docker.utils.socket import next_frame_size
-from docker.utils.socket import read_exactly
-
import pytest
-
import requests
-import six
-from .base import BUSYBOX, BaseAPIIntegrationTest
+import docker
from .. import helpers
-from ..helpers import (
- requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
-)
+from ..helpers import assert_cat_socket_detached_with_keys
+from ..helpers import ctrl_with
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
+from .base import TEST_IMG
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.utils.socket import next_frame_header
+from docker.utils.socket import read_exactly
class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
- res1 = self.client.create_container(BUSYBOX, 'true')
+ res1 = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res1
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
@@ -36,22 +34,22 @@ class ListContainersTest(BaseAPIIntegrationTest):
assert len(retrieved) == 1
retrieved = retrieved[0]
assert 'Command' in retrieved
- assert retrieved['Command'] == six.text_type('true')
+ assert retrieved['Command'] == 'true'
assert 'Image' in retrieved
- assert re.search(r'busybox:.*', retrieved['Image'])
+ assert re.search(r'alpine:.*', retrieved['Image'])
assert 'Status' in retrieved
class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
def test_create_with_host_pid_mode(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
@@ -66,7 +64,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_links(self):
res0 = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
@@ -76,7 +74,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container1_id)
res1 = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
@@ -95,7 +93,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
- BUSYBOX, 'env', host_config=self.client.create_host_config(
+ TEST_IMG, 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='bridge'
)
@@ -105,17 +103,15 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container3_id)
assert self.client.wait(container3_id)['StatusCode'] == 0
- logs = self.client.logs(container3_id)
- if six.PY3:
- logs = logs.decode('utf-8')
- assert '{0}_NAME='.format(link_env_prefix1) in logs
- assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
- assert '{0}_NAME='.format(link_env_prefix2) in logs
- assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
+ logs = self.client.logs(container3_id).decode('utf-8')
+ assert f'{link_env_prefix1}_NAME=' in logs
+ assert f'{link_env_prefix1}_ENV_FOO=1' in logs
+ assert f'{link_env_prefix2}_NAME=' in logs
+ assert f'{link_env_prefix2}_ENV_FOO=1' in logs
def test_create_with_restart_policy(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '2'],
+ TEST_IMG, ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
@@ -134,21 +130,21 @@ class CreateContainerTest(BaseAPIIntegrationTest):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
- BUSYBOX, 'true', name=vol_names[0]
+ TEST_IMG, 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
- BUSYBOX, 'true', name=vol_names[1]
+ TEST_IMG, 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
res = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True,
+ TEST_IMG, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
@@ -162,7 +158,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def create_container_readonly_fs(self):
ctnr = self.client.create_container(
- BUSYBOX, ['mkdir', '/shrine'],
+ TEST_IMG, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
@@ -174,7 +170,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
assert res != 0
def create_container_with_name(self):
- res = self.client.create_container(BUSYBOX, 'true', name='foobar')
+ res = self.client.create_container(TEST_IMG, 'true', name='foobar')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
@@ -183,7 +179,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def create_container_privileged(self):
res = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
@@ -209,7 +205,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
+ TEST_IMG, ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
@@ -221,23 +217,21 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_group_id_ints(self):
container = self.client.create_container(
- BUSYBOX, 'id -G',
+ TEST_IMG, 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
assert '1001' in groups
def test_group_id_strings(self):
container = self.client.create_container(
- BUSYBOX, 'id -G', host_config=self.client.create_host_config(
+ TEST_IMG, 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
@@ -245,9 +239,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.wait(container)
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
@@ -260,7 +252,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -274,21 +266,24 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_invalid_log_driver_raises_exception(self):
log_config = docker.types.LogConfig(
- type='asdf-nope',
+ type='asdf',
config={}
)
- expected_msg = "logger: no log driver named 'asdf-nope' is registered"
+ expected_msgs = [
+ "logger: no log driver named 'asdf' is registered",
+ "error looking up logging plugin asdf: plugin \"asdf\" not found",
+ ]
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500
container = self.client.create_container(
- BUSYBOX, ['true'], host_config=self.client.create_host_config(
+ TEST_IMG, ['true'], host_config=self.client.create_host_config(
log_config=log_config
)
)
self.client.start(container)
- assert excinfo.value.explanation == expected_msg
+ assert excinfo.value.explanation in expected_msgs
def test_valid_no_log_driver_specified(self):
log_config = docker.types.LogConfig(
@@ -297,7 +292,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -316,7 +311,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
container = self.client.create_container(
- BUSYBOX, ['true'],
+ TEST_IMG, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
@@ -330,7 +325,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_memory_constraints_with_str(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(
memswap_limit='1G',
mem_limit='700M'
@@ -348,7 +343,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_memory_constraints_with_int(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(mem_swappiness=40)
)
assert 'Id' in ctnr
@@ -362,16 +357,15 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
- BUSYBOX,
+ TEST_IMG,
['echo'],
environment={'Foo': None, 'Other': 'one', 'Blank': ''},
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container['Id'])
- assert (
- sorted(config['Config']['Env']) ==
- sorted(['Foo', 'Other=one', 'Blank='])
- )
+ assert 'Foo' in config['Config']['Env']
+ assert 'Other=one' in config['Config']['Env']
+ assert 'Blank=' in config['Config']['Env']
@requires_api_version('1.22')
def test_create_with_tmpfs(self):
@@ -380,7 +374,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
}
container = self.client.create_container(
- BUSYBOX,
+ TEST_IMG,
['echo'],
host_config=self.client.create_host_config(
tmpfs=tmpfs))
@@ -392,7 +386,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_create_with_isolation(self):
container = self.client.create_container(
- BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ TEST_IMG, ['echo'], host_config=self.client.create_host_config(
isolation='default'
)
)
@@ -406,7 +400,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
auto_remove=True
)
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], host_config=host_config
+ TEST_IMG, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -415,7 +409,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_stop_timeout(self):
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], stop_timeout=25
+ TEST_IMG, ['echo', 'test'], stop_timeout=25
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -428,7 +422,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
storage_opt={'size': '120G'}
)
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], host_config=host_config
+ TEST_IMG, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container)
config = self.client.inspect_container(container)
@@ -439,7 +433,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_init(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true',
+ TEST_IMG, 'true',
host_config=self.client.create_host_config(
init=True
)
@@ -448,25 +442,12 @@ class CreateContainerTest(BaseAPIIntegrationTest):
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['Init'] is True
- @pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
- @requires_api_version('1.25')
- def test_create_with_init_path(self):
- ctnr = self.client.create_container(
- BUSYBOX, 'true',
- host_config=self.client.create_host_config(
- init_path="/usr/libexec/docker-init"
- )
- )
- self.tmp_containers.append(ctnr['Id'])
- config = self.client.inspect_container(ctnr)
- assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
-
@requires_api_version('1.24')
@pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
def test_create_with_cpu_rt_options(self):
ctnr = self.client.create_container(
- BUSYBOX, 'true', host_config=self.client.create_host_config(
+ TEST_IMG, 'true', host_config=self.client.create_host_config(
cpu_rt_period=1000, cpu_rt_runtime=500
)
)
@@ -479,7 +460,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_create_with_device_cgroup_rules(self):
rule = 'c 7:128 rwm'
ctnr = self.client.create_container(
- BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list',
+ TEST_IMG, 'cat /sys/fs/cgroup/devices/devices.list',
host_config=self.client.create_host_config(
device_cgroup_rules=[rule]
)
@@ -490,13 +471,23 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
assert rule in self.client.logs(ctnr).decode('utf-8')
+ def test_create_with_uts_mode(self):
+ container = self.client.create_container(
+ TEST_IMG, ['echo'], host_config=self.client.create_host_config(
+ uts_mode='host'
+ )
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['UTSMode'] == 'host'
+
@pytest.mark.xfail(
IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
)
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
- super(VolumeBindTest, self).setUp()
+ super().setUp()
self.mount_dest = '/mnt'
@@ -506,7 +497,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['touch', os.path.join(self.mount_dest, self.filename)],
)
@@ -514,13 +505,10 @@ class VolumeBindTest(BaseAPIIntegrationTest):
container = self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['ls', self.mount_dest],
)
- logs = self.client.logs(container)
-
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -528,18 +516,16 @@ class VolumeBindTest(BaseAPIIntegrationTest):
def test_create_with_binds_ro(self):
self.run_with_volume(
False,
- BUSYBOX,
+ TEST_IMG,
['touch', os.path.join(self.mount_dest, self.filename)],
)
container = self.run_with_volume(
True,
- BUSYBOX,
+ TEST_IMG,
['ls', self.mount_dest],
)
- logs = self.client.logs(container)
+ logs = self.client.logs(container).decode('utf-8')
- if six.PY3:
- logs = logs.decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
@@ -552,13 +538,11 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.run_container(
- BUSYBOX, ['ls', self.mount_dest],
+ TEST_IMG, ['ls', self.mount_dest],
host_config=host_config
)
assert container
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -571,13 +555,11 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.run_container(
- BUSYBOX, ['ls', self.mount_dest],
+ TEST_IMG, ['ls', self.mount_dest],
host_config=host_config
)
assert container
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
@@ -590,7 +572,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
)
host_config = self.client.create_host_config(mounts=[mount])
container = self.client.create_container(
- BUSYBOX, ['true'], host_config=host_config,
+ TEST_IMG, ['true'], host_config=host_config,
)
assert container
inspect_data = self.client.inspect_container(container)
@@ -636,7 +618,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, f'sh -c "echo {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -647,15 +629,14 @@ class ArchiveTest(BaseAPIIntegrationTest):
for d in strm:
destination.write(d)
destination.seek(0)
- retrieved_data = helpers.untar_file(destination, 'data.txt')
- if six.PY3:
- retrieved_data = retrieved_data.decode('utf-8')
+ retrieved_data = helpers.untar_file(destination, 'data.txt')\
+ .decode('utf-8')
assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, f'sh -c "echo -n {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -673,8 +654,8 @@ class ArchiveTest(BaseAPIIntegrationTest):
test_file.write(data)
test_file.seek(0)
ctnr = self.client.create_container(
- BUSYBOX,
- 'cat {0}'.format(
+ TEST_IMG,
+ 'cat {}'.format(
os.path.join('/vol1/', os.path.basename(test_file.name))
),
volumes=['/vol1']
@@ -685,9 +666,6 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
- data = data.decode('utf-8')
assert logs.strip() == data
def test_copy_directory_to_container(self):
@@ -695,16 +673,14 @@ class ArchiveTest(BaseAPIIntegrationTest):
dirs = ['foo', 'bar']
base = helpers.make_tree(dirs, files)
ctnr = self.client.create_container(
- BUSYBOX, 'ls -p /vol1', volumes=['/vol1']
+ TEST_IMG, 'ls -p /vol1', volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with docker.utils.tar(base) as test_tar:
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
- logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(ctnr).decode('utf-8')
results = logs.strip().split()
assert 'a.py' in results
assert 'b.py' in results
@@ -716,7 +692,7 @@ class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
@@ -725,12 +701,12 @@ class RenameContainerTest(BaseAPIIntegrationTest):
if version == '1.5.0':
assert name == inspect['Name']
else:
- assert '/{0}'.format(name) == inspect['Name']
+ assert f'/{name}' == inspect['Name']
class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
@@ -746,7 +722,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
assert inspect['State']['ExitCode'] == 0
def test_start_container_with_dict_instead_of_id(self):
- res = self.client.create_container(BUSYBOX, 'true')
+ res = self.client.create_container(TEST_IMG, 'true')
assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res)
@@ -774,7 +750,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
'true && echo "Night of Nights"'
]
for cmd in commands:
- container = self.client.create_container(BUSYBOX, cmd)
+ container = self.client.create_container(TEST_IMG, cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -784,7 +760,7 @@ class StartContainerTest(BaseAPIIntegrationTest):
class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
- res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
@@ -797,7 +773,7 @@ class WaitTest(BaseAPIIntegrationTest):
assert inspect['State']['ExitCode'] == exitcode
def test_wait_with_dict_instead_of_id(self):
- res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
@@ -811,13 +787,13 @@ class WaitTest(BaseAPIIntegrationTest):
@requires_api_version('1.30')
def test_wait_with_condition(self):
- ctnr = self.client.create_container(BUSYBOX, 'true')
+ ctnr = self.client.create_container(TEST_IMG, 'true')
self.tmp_containers.append(ctnr)
with pytest.raises(requests.exceptions.ConnectionError):
self.client.wait(ctnr, condition='removed', timeout=1)
ctnr = self.client.create_container(
- BUSYBOX, ['sleep', '3'],
+ TEST_IMG, ['sleep', '3'],
host_config=self.client.create_host_config(auto_remove=True)
)
self.tmp_containers.append(ctnr)
@@ -831,7 +807,7 @@ class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -845,7 +821,7 @@ class LogsTest(BaseAPIIntegrationTest):
snippet = '''Line1
Line2'''
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -858,12 +834,12 @@ Line2'''
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- logs = six.binary_type()
+ logs = b''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
@@ -873,15 +849,17 @@ Line2'''
assert logs == (snippet + '\n').encode(encoding='ascii')
@pytest.mark.timeout(5)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
def test_logs_streaming_and_follow_and_cancel(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ TEST_IMG, f'sh -c "echo \\"{snippet}\\" && sleep 3"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- logs = six.binary_type()
+ logs = b''
generator = self.client.logs(id, stream=True, follow=True)
threading.Timer(1, generator.close).start()
@@ -894,7 +872,7 @@ Line2'''
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -907,7 +885,7 @@ Line2'''
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -921,7 +899,7 @@ Line2'''
def test_logs_with_until(self):
snippet = 'Shanghai Teahouse (Hong Meiling)'
container = self.client.create_container(
- BUSYBOX, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
self.tmp_containers.append(container)
@@ -936,7 +914,7 @@ Line2'''
class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -949,7 +927,7 @@ class DiffTest(BaseAPIIntegrationTest):
assert test_diff[0]['Kind'] == 1
def test_diff_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -964,7 +942,7 @@ class DiffTest(BaseAPIIntegrationTest):
class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -976,7 +954,7 @@ class StopTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_stop_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
assert 'Id' in container
id = container['Id']
self.client.start(container)
@@ -991,7 +969,7 @@ class StopTest(BaseAPIIntegrationTest):
class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1005,7 +983,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_kill_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1019,7 +997,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False
def test_kill_with_signal(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(id)
self.client.start(id)
self.client.kill(
@@ -1036,7 +1014,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False, state
def test_kill_with_signal_name(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
@@ -1051,7 +1029,7 @@ class KillTest(BaseAPIIntegrationTest):
assert state['Running'] is False, state
def test_kill_with_signal_integer(self):
- id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ id = self.client.create_container(TEST_IMG, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
@@ -1068,14 +1046,19 @@ class KillTest(BaseAPIIntegrationTest):
class PortTest(BaseAPIIntegrationTest):
def test_port(self):
-
port_bindings = {
'1111': ('127.0.0.1', '4567'),
- '2222': ('127.0.0.1', '4568')
+ '2222': ('127.0.0.1', '4568'),
+ '3333/udp': ('127.0.0.1', '4569'),
}
+ ports = [
+ 1111,
+ 2222,
+ (3333, 'udp'),
+ ]
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
+ TEST_IMG, ['sleep', '60'], ports=ports,
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
@@ -1086,21 +1069,25 @@ class PortTest(BaseAPIIntegrationTest):
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
+ port, _, protocol = port.partition('/')
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
- assert ip == port_bindings[port][0]
- assert host_port == port_bindings[port][1]
+ port_binding = port if not protocol else port + "/" + protocol
+ assert ip == port_bindings[port_binding][0]
+ assert host_port == port_bindings[port_binding][1]
self.client.kill(id)
class ContainerTopTest(BaseAPIIntegrationTest):
+ @pytest.mark.xfail(reason='Output of docker top depends on host distro, '
+ 'and is not formalized.')
def test_top(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60']
+ TEST_IMG, ['sleep', '60']
)
self.tmp_containers.append(container)
@@ -1108,9 +1095,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.top(container)
if not IS_WINDOWS_PLATFORM:
- assert res['Titles'] == [
- 'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD'
- ]
+ assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND']
assert len(res['Processes']) == 1
assert res['Processes'][0][-1] == 'sleep 60'
self.client.kill(container)
@@ -1118,25 +1103,24 @@ class ContainerTopTest(BaseAPIIntegrationTest):
@pytest.mark.skipif(
IS_WINDOWS_PLATFORM, reason='No psargs support on windows'
)
+ @pytest.mark.xfail(reason='Output of docker top depends on host distro, '
+ 'and is not formalized.')
def test_top_with_psargs(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'])
+ TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(container)
self.client.start(container)
- res = self.client.top(container, 'waux')
- assert res['Titles'] == [
- 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
- 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'
- ]
+ res = self.client.top(container, '-eopid,user')
+ assert res['Titles'] == ['PID', 'USER']
assert len(res['Processes']) == 1
assert res['Processes'][0][10] == 'sleep 60'
class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -1155,16 +1139,16 @@ class RestartContainerTest(BaseAPIIntegrationTest):
self.client.kill(id)
def test_restart_with_low_timeout(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.client.start(container)
- self.client.timeout = 1
- self.client.restart(container, timeout=3)
+ self.client.timeout = 3
+ self.client.restart(container, timeout=1)
self.client.timeout = None
- self.client.restart(container, timeout=3)
+ self.client.restart(container, timeout=1)
self.client.kill(container)
def test_restart_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
assert 'Id' in container
id = container['Id']
self.client.start(container)
@@ -1186,7 +1170,7 @@ class RestartContainerTest(BaseAPIIntegrationTest):
class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
- container = self.client.create_container(BUSYBOX, ['true'])
+ container = self.client.create_container(TEST_IMG, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
@@ -1196,7 +1180,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
assert len(res) == 0
def test_remove_with_dict_instead_of_id(self):
- container = self.client.create_container(BUSYBOX, ['true'])
+ container = self.client.create_container(TEST_IMG, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
@@ -1208,7 +1192,7 @@ class RemoveContainerTest(BaseAPIIntegrationTest):
class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
- container = self.client.create_container(BUSYBOX, '/bin/sh',
+ container = self.client.create_container(TEST_IMG, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
@@ -1219,8 +1203,8 @@ class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_reading_socket(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
- command = "printf '{0}'".format(line)
- container = self.client.create_container(BUSYBOX, command,
+ command = f"printf '{line}'"
+ container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
@@ -1230,31 +1214,37 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
- next_size = next_frame_size(pty_stdout)
+ (stream, next_size) = next_frame_header(pty_stdout)
+ assert stream == 1 # correspond to stdout
assert next_size == len(line)
data = read_exactly(pty_stdout, next_size)
assert data.decode('utf-8') == line
def test_attach_no_stream(self):
container = self.client.create_container(
- BUSYBOX, 'echo hello'
+ TEST_IMG, 'echo hello'
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
- @pytest.mark.timeout(5)
+ @pytest.mark.timeout(10)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
+ @pytest.mark.xfail(condition=os.environ.get('DOCKER_TLS_VERIFY') or
+ os.environ.get('DOCKER_CERT_PATH'),
+ reason='Flaky test on TLS')
def test_attach_stream_and_cancel(self):
container = self.client.create_container(
- BUSYBOX, 'sh -c "echo hello && sleep 60"',
+ TEST_IMG, 'sh -c "sleep 2 && echo hello && sleep 60"',
tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=True, logs=True)
- threading.Timer(1, output.close).start()
+ threading.Timer(3, output.close).start()
lines = []
for line in output:
@@ -1265,7 +1255,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
def test_detach_with_default(self):
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1284,7 +1274,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1301,7 +1291,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat',
+ TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
@@ -1317,7 +1307,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
@@ -1348,9 +1338,9 @@ class PruneTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_prune_containers(self):
container1 = self.client.create_container(
- BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
+ TEST_IMG, ['sh', '-c', 'echo hello > /data.txt']
)
- container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ container2 = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.client.start(container1)
self.client.start(container2)
self.client.wait(container1)
@@ -1363,7 +1353,7 @@ class PruneTest(BaseAPIIntegrationTest):
class GetContainerStatsTest(BaseAPIIntegrationTest):
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
@@ -1377,7 +1367,7 @@ class GetContainerStatsTest(BaseAPIIntegrationTest):
def test_get_container_stats_stream(self):
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
@@ -1395,7 +1385,7 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
old_mem_limit = 400 * 1024 * 1024
new_mem_limit = 300 * 1024 * 1024
container = self.client.create_container(
- BUSYBOX, 'top', host_config=self.client.create_host_config(
+ TEST_IMG, 'top', host_config=self.client.create_host_config(
mem_limit=old_mem_limit
)
)
@@ -1416,7 +1406,7 @@ class ContainerUpdateTest(BaseAPIIntegrationTest):
'Name': 'on-failure'
}
container = self.client.create_container(
- BUSYBOX, ['sleep', '60'],
+ TEST_IMG, ['sleep', '60'],
host_config=self.client.create_host_config(
restart_policy=old_restart_policy
)
@@ -1440,7 +1430,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
- BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpu_shares=cpu_shares
)
)
@@ -1452,7 +1442,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
- BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ TEST_IMG, 'ls', host_config=self.client.create_host_config(
cpuset_cpus=cpuset_cpus
)
)
@@ -1464,7 +1454,7 @@ class ContainerCPUTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_with_runtime(self):
container = self.client.create_container(
- BUSYBOX, ['echo', 'test'], runtime='runc'
+ TEST_IMG, ['echo', 'test'], runtime='runc'
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
@@ -1475,7 +1465,7 @@ class LinkTest(BaseAPIIntegrationTest):
def test_remove_link(self):
# Create containers
container1 = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
@@ -1487,7 +1477,7 @@ class LinkTest(BaseAPIIntegrationTest):
link_alias = 'mylink'
container2 = self.client.create_container(
- BUSYBOX, 'cat', host_config=self.client.create_host_config(
+ TEST_IMG, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}
)
)
@@ -1497,7 +1487,7 @@ class LinkTest(BaseAPIIntegrationTest):
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
- link_name = '%s/%s' % (linked_name, link_alias)
+ link_name = f'{linked_name}/{link_alias}'
self.client.remove_container(link_name, link=True)
# Link is gone
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
index 1a5a4e5..4d7748f 100644
--- a/tests/integration/api_exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -1,15 +1,54 @@
-from docker.utils.socket import next_frame_size
+from ..helpers import assert_cat_socket_detached_with_keys
+from ..helpers import ctrl_with
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
+from .base import TEST_IMG
+from docker.utils.proxy import ProxyConfig
+from docker.utils.socket import next_frame_header
from docker.utils.socket import read_exactly
-from .base import BaseAPIIntegrationTest, BUSYBOX
-from ..helpers import (
- requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
-)
-
class ExecTest(BaseAPIIntegrationTest):
+ def test_execute_command_with_proxy_env(self):
+ # Set a custom proxy config on the client
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', https='b', http='c', no_proxy='d'
+ )
+
+ container = self.client.create_container(
+ TEST_IMG, 'cat', detach=True, stdin_open=True,
+ )
+ self.client.start(container)
+ self.tmp_containers.append(container)
+
+ cmd = 'sh -c "env | grep -i proxy"'
+
+ # First, just make sure the environment variables from the custom
+ # config are set
+
+ res = self.client.exec_create(container, cmd=cmd)
+ output = self.client.exec_start(res).decode('utf-8').split('\n')
+ expected = [
+ 'ftp_proxy=a', 'https_proxy=b', 'http_proxy=c', 'no_proxy=d',
+ 'FTP_PROXY=a', 'HTTPS_PROXY=b', 'HTTP_PROXY=c', 'NO_PROXY=d'
+ ]
+ for item in expected:
+ assert item in output
+
+ # Overwrite some variables with a custom environment
+ env = {'https_proxy': 'xxx', 'HTTPS_PROXY': 'XXX'}
+
+ res = self.client.exec_create(container, cmd=cmd, environment=env)
+ output = self.client.exec_start(res).decode('utf-8').split('\n')
+ expected = [
+ 'ftp_proxy=a', 'https_proxy=xxx', 'http_proxy=c', 'no_proxy=d',
+ 'FTP_PROXY=a', 'HTTPS_PROXY=XXX', 'HTTP_PROXY=c', 'NO_PROXY=d'
+ ]
+ for item in expected:
+ assert item in output
+
def test_execute_command(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -22,7 +61,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'hello\n'
def test_exec_command_string(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -35,20 +74,20 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'hello world\n'
def test_exec_command_as_user(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- res = self.client.exec_create(id, 'whoami', user='default')
+ res = self.client.exec_create(id, 'whoami', user='postgres')
assert 'Id' in res
exec_log = self.client.exec_start(res)
- assert exec_log == b'default\n'
+ assert exec_log == b'postgres\n'
def test_exec_command_as_root(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -61,7 +100,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert exec_log == b'root\n'
def test_exec_command_streaming(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
@@ -76,7 +115,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert res == b'hello\nworld\n'
def test_exec_start_socket(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
@@ -91,13 +130,14 @@ class ExecTest(BaseAPIIntegrationTest):
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
- next_size = next_frame_size(socket)
+ (stream, next_size) = next_frame_header(socket)
+ assert stream == 1 # stdout (0 = stdin, 1 = stdout, 2 = stderr)
assert next_size == len(line)
data = read_exactly(socket, next_size)
assert data.decode('utf-8') == line
def test_exec_start_detached(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
@@ -112,7 +152,7 @@ class ExecTest(BaseAPIIntegrationTest):
assert response == ""
def test_exec_inspect(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -127,7 +167,7 @@ class ExecTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_exec_command_with_env(self):
- container = self.client.create_container(BUSYBOX, 'cat',
+ container = self.client.create_container(TEST_IMG, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
@@ -142,18 +182,18 @@ class ExecTest(BaseAPIIntegrationTest):
@requires_api_version('1.35')
def test_exec_command_with_workdir(self):
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
self.tmp_containers.append(container)
self.client.start(container)
- res = self.client.exec_create(container, 'pwd', workdir='/var/www')
+ res = self.client.exec_create(container, 'pwd', workdir='/var/opt')
exec_log = self.client.exec_start(res)
- assert exec_log == b'/var/www\n'
+ assert exec_log == b'/var/opt\n'
def test_detach_with_default(self):
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
@@ -172,7 +212,7 @@ class ExecTest(BaseAPIIntegrationTest):
def test_detach_with_config_file(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
@@ -186,20 +226,87 @@ class ExecTest(BaseAPIIntegrationTest):
assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
- def test_detach_with_arg(self):
- self.client._general_configs['detachKeys'] = 'ctrl-p'
- container = self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True
- )
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
- exec_id = self.client.exec_create(
- id, 'cat',
- stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
+class ExecDemuxTest(BaseAPIIntegrationTest):
+ cmd = 'sh -c "{}"'.format(' ; '.join([
+ # Write something on stdout
+ 'echo hello out',
+ # Busybox's sleep does not handle sub-second times.
+ # This loops takes ~0.3 second to execute on my machine.
+ 'sleep 0.5',
+ # Write something on stderr
+ 'echo hello err >&2'])
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.container = self.client.create_container(
+ TEST_IMG, 'cat', detach=True, stdin_open=True
)
- sock = self.client.exec_start(exec_id, tty=True, socket=True)
- self.addCleanup(sock.close)
+ self.client.start(self.container)
+ self.tmp_containers.append(self.container)
- assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
+ def test_exec_command_no_stream_no_demux(self):
+ # tty=False, stream=False, demux=False
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = self.client.exec_start(res)
+ assert b'hello out\n' in exec_log
+ assert b'hello err\n' in exec_log
+
+ def test_exec_command_stream_no_demux(self):
+ # tty=False, stream=True, demux=False
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = list(self.client.exec_start(res, stream=True))
+ assert len(exec_log) == 2
+ assert b'hello out\n' in exec_log
+ assert b'hello err\n' in exec_log
+
+ def test_exec_command_no_stream_demux(self):
+ # tty=False, stream=False, demux=True
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = self.client.exec_start(res, demux=True)
+ assert exec_log == (b'hello out\n', b'hello err\n')
+
+ def test_exec_command_stream_demux(self):
+ # tty=False, stream=True, demux=True
+ res = self.client.exec_create(self.container, self.cmd)
+ exec_log = list(self.client.exec_start(res, demux=True, stream=True))
+ assert len(exec_log) == 2
+ assert (b'hello out\n', None) in exec_log
+ assert (None, b'hello err\n') in exec_log
+
+ def test_exec_command_tty_no_stream_no_demux(self):
+ # tty=True, stream=False, demux=False
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello out\r\nhello err\r\n'
+
+ def test_exec_command_tty_stream_no_demux(self):
+ # tty=True, stream=True, demux=False
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = list(self.client.exec_start(res, stream=True))
+ assert b'hello out\r\n' in exec_log
+ if len(exec_log) == 2:
+ assert b'hello err\r\n' in exec_log
+ else:
+ assert len(exec_log) == 3
+ assert b'hello err' in exec_log
+ assert b'\r\n' in exec_log
+
+ def test_exec_command_tty_no_stream_demux(self):
+ # tty=True, stream=False, demux=True
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = self.client.exec_start(res, demux=True)
+ assert exec_log == (b'hello out\r\nhello err\r\n', None)
+
+ def test_exec_command_tty_stream_demux(self):
+ # tty=True, stream=True, demux=True
+ res = self.client.exec_create(self.container, self.cmd, tty=True)
+ exec_log = list(self.client.exec_start(res, demux=True, stream=True))
+ assert (b'hello out\r\n', None) in exec_log
+ if len(exec_log) == 2:
+ assert (b'hello err\r\n', None) in exec_log
+ else:
+ assert len(exec_log) == 3
+ assert (b'hello err', None) in exec_log
+ assert (b'\r\n', None) in exec_log
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
index 5dbac37..c54583b 100644
--- a/tests/integration/api_healthcheck_test.py
+++ b/tests/integration/api_healthcheck_test.py
@@ -1,4 +1,4 @@
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
from .. import helpers
SECOND = 1000000000
@@ -16,7 +16,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_shell_command(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ TEST_IMG, 'top', healthcheck=dict(test='echo "hello world"'))
self.tmp_containers.append(container)
res = self.client.inspect_container(container)
@@ -27,7 +27,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_passes(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="true",
interval=1 * SECOND,
timeout=1 * SECOND,
@@ -40,7 +40,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_fails(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="false",
interval=1 * SECOND,
timeout=1 * SECOND,
@@ -53,7 +53,7 @@ class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.29')
def test_healthcheck_start_period(self):
container = self.client.create_container(
- BUSYBOX, 'top', healthcheck=dict(
+ TEST_IMG, 'top', healthcheck=dict(
test="echo 'x' >> /counter.txt && "
"test `cat /counter.txt | wc -l` -ge 3",
interval=1 * SECOND,
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
index 050e7f3..e30de46 100644
--- a/tests/integration/api_image_test.py
+++ b/tests/integration/api_image_test.py
@@ -7,15 +7,14 @@ import tempfile
import threading
import pytest
-import six
-from six.moves import BaseHTTPServer
-from six.moves import socketserver
+from http.server import SimpleHTTPRequestHandler
+import socketserver
import docker
from ..helpers import requires_api_version, requires_experimental
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class ListImagesTest(BaseAPIIntegrationTest):
@@ -33,7 +32,7 @@ class ListImagesTest(BaseAPIIntegrationTest):
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
- assert type(res1[0]) == six.text_type
+ assert type(res1[0]) == str
class PullImageTest(BaseAPIIntegrationTest):
@@ -42,9 +41,9 @@ class PullImageTest(BaseAPIIntegrationTest):
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- res = self.client.pull('hello-world', tag='latest')
+ res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
- assert type(res) == six.text_type
+ assert type(res) == str
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@@ -55,7 +54,7 @@ class PullImageTest(BaseAPIIntegrationTest):
except docker.errors.APIError:
pass
stream = self.client.pull(
- 'hello-world', tag='latest', stream=True, decode=True)
+ 'hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
@@ -69,13 +68,15 @@ class PullImageTest(BaseAPIIntegrationTest):
with pytest.raises(docker.errors.APIError) as excinfo:
self.client.pull('hello-world', platform='foobar')
- assert excinfo.value.status_code == 500
- assert 'invalid platform' in excinfo.exconly()
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -88,13 +89,13 @@ class CommitTest(BaseAPIIntegrationTest):
assert img['Container'].startswith(id)
assert 'ContainerConfig' in img
assert 'Image' in img['ContainerConfig']
- assert BUSYBOX == img['ContainerConfig']['Image']
- busybox_id = self.client.inspect_image(BUSYBOX)['Id']
+ assert TEST_IMG == img['ContainerConfig']['Image']
+ busybox_id = self.client.inspect_image(TEST_IMG)['Id']
assert 'Parent' in img
assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
- cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ cid = self.client.create_container(TEST_IMG, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
@@ -110,7 +111,7 @@ class CommitTest(BaseAPIIntegrationTest):
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
- container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
@@ -264,14 +265,14 @@ class ImportImageTest(BaseAPIIntegrationTest):
output = self.client.load_image(data)
assert any([
line for line in output
- if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
+ if f'Loaded image: {test_img}' in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
- class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
+ class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
@@ -283,7 +284,7 @@ class ImportImageTest(BaseAPIIntegrationTest):
thread.setDaemon(True)
thread.start()
- yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
+ yield f'http://{socket.gethostname()}:{server.server_address[1]}'
server.shutdown()
@@ -317,7 +318,7 @@ class PruneImagesTest(BaseAPIIntegrationTest):
pass
# Ensure busybox does not get pruned
- ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ ctnr = self.client.create_container(TEST_IMG, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
@@ -341,7 +342,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
@requires_api_version('1.23')
def test_get_image_load_image(self):
with tempfile.TemporaryFile() as f:
- stream = self.client.get_image(BUSYBOX)
+ stream = self.client.get_image(TEST_IMG)
for chunk in stream:
f.write(chunk)
@@ -349,7 +350,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
result = self.client.load_image(f.read())
success = False
- result_line = 'Loaded image: {}\n'.format(BUSYBOX)
+ result_line = f'Loaded image: {TEST_IMG}\n'
for data in result:
print(data)
if 'stream' in data:
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
index b6726d0..2568138 100644
--- a/tests/integration/api_network_test.py
+++ b/tests/integration/api_network_test.py
@@ -3,13 +3,13 @@ from docker.types import IPAMConfig, IPAMPool
import pytest
from ..helpers import random_name, requires_api_version
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class TestNetworks(BaseAPIIntegrationTest):
def tearDown(self):
- super(TestNetworks, self).tearDown()
self.client.leave_swarm(force=True)
+ super().tearDown()
def create_network(self, *args, **kwargs):
net_name = random_name()
@@ -92,7 +92,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -119,7 +119,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -144,7 +144,7 @@ class TestNetworks(BaseAPIIntegrationTest):
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
- container = self.client.create_container(BUSYBOX, 'top')
+ container = self.client.create_container(TEST_IMG, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -161,7 +161,7 @@ class TestNetworks(BaseAPIIntegrationTest):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image=BUSYBOX,
+ image=TEST_IMG,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
@@ -181,7 +181,7 @@ class TestNetworks(BaseAPIIntegrationTest):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image=BUSYBOX,
+ image=TEST_IMG,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
@@ -211,7 +211,7 @@ class TestNetworks(BaseAPIIntegrationTest):
),
)
container = self.client.create_container(
- image=BUSYBOX, command='top',
+ image=TEST_IMG, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -237,7 +237,7 @@ class TestNetworks(BaseAPIIntegrationTest):
),
)
container = self.client.create_container(
- image=BUSYBOX, command='top',
+ image=TEST_IMG, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -257,7 +257,7 @@ class TestNetworks(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
- BUSYBOX, 'top',
+ TEST_IMG, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
@@ -275,6 +275,27 @@ class TestNetworks(BaseAPIIntegrationTest):
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
+ @requires_api_version('1.32')
+ def test_create_with_driveropt(self):
+ container = self.client.create_container(
+ TEST_IMG, 'top',
+ networking_config=self.client.create_networking_config(
+ {
+ 'bridge': self.client.create_endpoint_config(
+ driver_opt={'com.docker-py.setting': 'on'}
+ )
+ }
+ ),
+ host_config=self.client.create_host_config(network_mode='bridge')
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ container_data = self.client.inspect_container(container)
+ net_cfg = container_data['NetworkSettings']['Networks']['bridge']
+ assert 'DriverOpts' in net_cfg
+ assert 'com.docker-py.setting' in net_cfg['DriverOpts']
+ assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on'
+
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
index 1150b09..38f9d12 100644
--- a/tests/integration/api_plugin_test.py
+++ b/tests/integration/api_plugin_test.py
@@ -3,7 +3,7 @@ import os
import docker
import pytest
-from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from .base import BaseAPIIntegrationTest
from ..helpers import requires_api_version
SSHFS = 'vieux/sshfs:latest'
@@ -13,27 +13,27 @@ SSHFS = 'vieux/sshfs:latest'
class PluginTest(BaseAPIIntegrationTest):
@classmethod
def teardown_class(cls):
- c = docker.APIClient(
- version=TEST_API_VERSION, timeout=60,
- **docker.utils.kwargs_from_env()
- )
+ client = cls.get_client_instance()
try:
- c.remove_plugin(SSHFS, force=True)
+ client.remove_plugin(SSHFS, force=True)
except docker.errors.APIError:
pass
def teardown_method(self, method):
+ client = self.get_client_instance()
try:
- self.client.disable_plugin(SSHFS)
+ client.disable_plugin(SSHFS)
except docker.errors.APIError:
pass
for p in self.tmp_plugins:
try:
- self.client.remove_plugin(p, force=True)
+ client.remove_plugin(p, force=True)
except docker.errors.APIError:
pass
+ client.close()
+
def ensure_plugin_installed(self, plugin_name):
try:
return self.client.inspect_plugin(plugin_name)
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
index b3d93b8..fd98543 100644
--- a/tests/integration/api_secret_test.py
+++ b/tests/integration/api_secret_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import docker
import pytest
@@ -31,7 +29,7 @@ class SecretAPITest(BaseAPIIntegrationTest):
def test_create_secret_unicode_data(self):
secret_id = self.client.create_secret(
- 'favorite_character', u'いざよいさくや'
+ 'favorite_character', 'いざよいさくや'
)
self.tmp_secrets.append(secret_id)
assert 'ID' in secret_id
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
index 85f9dcc..dcf195d 100644
--- a/tests/integration/api_service_test.py
+++ b/tests/integration/api_service_test.py
@@ -1,16 +1,13 @@
-# -*- coding: utf-8 -*-
-
import random
import time
import docker
import pytest
-import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
)
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
class ServiceTest(BaseAPIIntegrationTest):
@@ -31,10 +28,10 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
- super(ServiceTest, self).tearDown()
+ super().tearDown()
def get_service_name(self):
- return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+ return f'dockerpytest_{random.getrandbits(64):x}'
def get_service_container(self, service_name, attempts=20, interval=0.5,
include_stopped=False):
@@ -55,12 +52,12 @@ class ServiceTest(BaseAPIIntegrationTest):
def create_simple_service(self, name=None, labels=None):
if name:
- name = 'dockerpytest_{0}'.format(name)
+ name = f'dockerpytest_{name}'
else:
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
return name, self.client.create_service(
@@ -150,13 +147,13 @@ class ServiceTest(BaseAPIIntegrationTest):
else:
break
- if six.PY3:
+ if log_line is not None:
log_line = log_line.decode('utf-8')
assert 'hello\n' in log_line
def test_create_service_custom_log_driver(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
log_cfg = docker.types.DriverConfig('none')
task_tmpl = docker.types.TaskTemplate(
@@ -174,7 +171,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_volume_mount(self):
vol_name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['ls'],
+ TEST_IMG, ['ls'],
mounts=[
docker.types.Mount(target='/test', source=vol_name)
]
@@ -194,7 +191,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert mount['Type'] == 'volume'
def test_create_service_with_resources_constraints(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
resources = docker.types.Resources(
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
@@ -214,7 +211,7 @@ class ServiceTest(BaseAPIIntegrationTest):
]
def _create_service_with_generic_resources(self, generic_resources):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
resources = docker.types.Resources(
generic_resources=generic_resources
@@ -265,7 +262,7 @@ class ServiceTest(BaseAPIIntegrationTest):
self._create_service_with_generic_resources(test_input)
def test_create_service_with_update_config(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
@@ -281,6 +278,20 @@ class ServiceTest(BaseAPIIntegrationTest):
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
+ @requires_api_version('1.28')
+ def test_create_service_with_failure_action_rollback(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(failure_action='rollback')
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
@requires_api_version('1.25')
def test_create_service_with_update_config_monitor(self):
container_spec = docker.types.ContainerSpec('busybox', ['true'])
@@ -298,8 +309,29 @@ class ServiceTest(BaseAPIIntegrationTest):
assert update_config['Monitor'] == uc['Monitor']
assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+ @requires_api_version('1.28')
+ def test_create_service_with_rollback_config(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ rollback_cfg = docker.types.RollbackConfig(
+ parallelism=10, delay=5, failure_action='pause',
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, rollback_config=rollback_cfg, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RollbackConfig' in svc_info['Spec']
+ rc = svc_info['Spec']['RollbackConfig']
+ assert rollback_cfg['Parallelism'] == rc['Parallelism']
+ assert rollback_cfg['Delay'] == rc['Delay']
+ assert rollback_cfg['FailureAction'] == rc['FailureAction']
+ assert rollback_cfg['Monitor'] == rc['Monitor']
+ assert rollback_cfg['MaxFailureRatio'] == rc['MaxFailureRatio']
+
def test_create_service_with_restart_policy(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
policy = docker.types.RestartPolicy(
docker.types.RestartPolicy.condition_types.ANY,
delay=5, max_attempts=5
@@ -322,7 +354,7 @@ class ServiceTest(BaseAPIIntegrationTest):
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
@@ -336,24 +368,53 @@ class ServiceTest(BaseAPIIntegrationTest):
{'Target': net1['Id']}, {'Target': net2['Id']}
]
+ def test_create_service_with_network_attachment_config(self):
+ network = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(network['Id'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ network_config = docker.types.NetworkAttachmentConfig(
+ target='dockerpytest_1',
+ aliases=['dockerpytest_1_alias'],
+ options={
+ 'foo': 'bar'
+ }
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ networks=[network_config]
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ service_networks_info = svc_info['Spec']['TaskTemplate']['Networks']
+ assert len(service_networks_info) == 1
+ assert service_networks_info[0]['Target'] == network['Id']
+ assert service_networks_info[0]['Aliases'] == ['dockerpytest_1_alias']
+ assert service_networks_info[0]['DriverOpts'] == {'foo': 'bar'}
+
def test_create_service_with_placement(self):
node_id = self.client.nodes()[0]['ID']
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(
- container_spec, placement=['node.id=={}'.format(node_id)]
+ container_spec, placement=[f'node.id=={node_id}']
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
- {'Constraints': ['node.id=={}'.format(node_id)]})
+ {'Constraints': [f'node.id=={node_id}']})
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(
- constraints=['node.id=={}'.format(node_id)]
+ constraints=[f'node.id=={node_id}']
)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
@@ -366,7 +427,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.30')
def test_create_service_with_placement_platform(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
@@ -379,7 +440,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.27')
def test_create_service_with_placement_preferences(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(preferences=[
{'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
])
@@ -392,8 +453,36 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+ @requires_api_version('1.27')
+ def test_create_service_with_placement_preferences_tuple(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ placemt = docker.types.Placement(preferences=(
+ ('spread', 'com.dockerpy.test'),
+ ))
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.40')
+ def test_create_service_with_placement_maxreplicas(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ placemt = docker.types.Placement(maxreplicas=1)
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
def test_create_service_with_endpoint_spec(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -417,13 +506,13 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@requires_api_version('1.32')
def test_create_service_with_endpoint_spec_host_publish_mode(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -443,7 +532,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
+ TEST_IMG, ['true'], env={'DOCKER_PY_TEST': 1}
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
@@ -459,7 +548,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.29')
def test_create_service_with_update_order(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, order='start-first'
@@ -478,7 +567,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_service_with_tty(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['true'], tty=True
+ TEST_IMG, ['true'], tty=True
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
@@ -495,7 +584,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_create_service_with_tty_dict(self):
container_spec = {
- 'Image': BUSYBOX,
+ 'Image': TEST_IMG,
'Command': ['true'],
'TTY': True
}
@@ -511,7 +600,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_global_mode(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -524,7 +613,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_replicated_mode(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['echo', 'hello']
+ TEST_IMG, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -579,14 +668,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /run/secrets/{0}'.format(secret_name)
+ container, f'cat /run/secrets/{secret_name}'
)
assert self.client.exec_start(exec_id) == secret_data
@requires_api_version('1.25')
def test_create_service_with_unicode_secret(self):
secret_name = 'favorite_touhou'
- secret_data = u'東方花映塚'
+ secret_data = '東方花映塚'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
@@ -604,7 +693,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /run/secrets/{0}'.format(secret_name)
+ container, f'cat /run/secrets/{secret_name}'
)
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
@@ -631,14 +720,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /{0}'.format(config_name)
+ container, f'cat /{config_name}'
)
assert self.client.exec_start(exec_id) == config_data
@requires_api_version('1.30')
def test_create_service_with_unicode_config(self):
config_name = 'favorite_touhou'
- config_data = u'東方花映塚'
+ config_data = '東方花映塚'
config_id = self.client.create_config(config_name, config_data)
self.tmp_configs.append(config_id)
config_ref = docker.types.ConfigReference(config_id, config_name)
@@ -656,7 +745,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /{0}'.format(config_name)
+ container, f'cat /{config_name}'
)
container_config = self.client.exec_start(exec_id)
container_config = container_config.decode('utf-8')
@@ -717,7 +806,7 @@ class ServiceTest(BaseAPIIntegrationTest):
search=['local'], options=['debug']
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], dns_config=dns_config
+ TEST_IMG, ['sleep', '999'], dns_config=dns_config
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -737,7 +826,7 @@ class ServiceTest(BaseAPIIntegrationTest):
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck=hc
+ TEST_IMG, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -754,7 +843,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.28')
def test_create_service_with_readonly(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], read_only=True
+ TEST_IMG, ['sleep', '999'], read_only=True
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -768,7 +857,7 @@ class ServiceTest(BaseAPIIntegrationTest):
@requires_api_version('1.28')
def test_create_service_with_stop_signal(self):
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
+ TEST_IMG, ['sleep', '999'], stop_signal='SIGINT'
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -786,7 +875,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def test_create_service_with_privileges(self):
priv = docker.types.Privileges(selinux_disable=True)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], privileges=priv
+ TEST_IMG, ['sleep', '999'], privileges=priv
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -800,6 +889,20 @@ class ServiceTest(BaseAPIIntegrationTest):
)
assert privileges['SELinuxContext']['Disable'] is True
+ @requires_api_version('1.38')
+ def test_create_service_with_init(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], init=True
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Init' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Init'] is True
+ )
+
@requires_api_version('1.25')
def test_update_service_with_defaults_name(self):
container_spec = docker.types.ContainerSpec(
@@ -928,7 +1031,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert labels['container.label'] == 'SampleLabel'
def test_update_service_with_defaults_update_config(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
@@ -967,7 +1070,7 @@ class ServiceTest(BaseAPIIntegrationTest):
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
@@ -1006,7 +1109,7 @@ class ServiceTest(BaseAPIIntegrationTest):
]
def test_update_service_with_defaults_endpoint_spec(self):
- container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -1031,7 +1134,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@@ -1058,7 +1161,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@@ -1070,7 +1173,7 @@ class ServiceTest(BaseAPIIntegrationTest):
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck=hc
+ TEST_IMG, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -1085,7 +1188,7 @@ class ServiceTest(BaseAPIIntegrationTest):
)
container_spec = docker.types.ContainerSpec(
- BUSYBOX, ['sleep', '999'], healthcheck={}
+ TEST_IMG, ['sleep', '999'], healthcheck={}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
@@ -1253,3 +1356,33 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.update_service(*args, **kwargs)
else:
raise
+
+ @requires_api_version('1.41')
+ def test_create_service_cap_add(self):
+ name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ TEST_IMG, ['echo', 'hello'], cap_add=['CAP_SYSLOG']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+ spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'CAP_SYSLOG' in spec['CapabilityAdd']
+
+ @requires_api_version('1.41')
+ def test_create_service_cap_drop(self):
+ name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ TEST_IMG, ['echo', 'hello'], cap_drop=['CAP_SYSLOG']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+ spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'CAP_SYSLOG' in spec['CapabilityDrop']
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
index dbf3786..48c0592 100644
--- a/tests/integration/api_swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -8,19 +8,18 @@ from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
- super(SwarmTest, self).setUp()
+ super().setUp()
force_leave_swarm(self.client)
self._unlock_key = None
def tearDown(self):
- super(SwarmTest, self).tearDown()
try:
if self._unlock_key:
self.client.unlock_swarm(self._unlock_key)
except docker.errors.APIError:
pass
-
force_leave_swarm(self.client)
+ super().tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):
@@ -36,6 +35,35 @@ class SwarmTest(BaseAPIIntegrationTest):
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_defaults(self):
+ assert self.init_swarm()
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
+ assert results['SubnetSize'] == 24
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_only_pool(self):
+ assert self.init_swarm(default_addr_pool=['2.0.0.0/16'])
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'}
+ assert results['SubnetSize'] == 24
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_only_subnet_size(self):
+ assert self.init_swarm(subnet_size=26)
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
+ assert results['SubnetSize'] == 26
+
+ @requires_api_version('1.39')
+ def test_init_swarm_custom_addr_pool_both_args(self):
+ assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'],
+ subnet_size=28)
+ results = self.client.inspect_swarm()
+ assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'}
+ assert results['SubnetSize'] == 28
+
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
@@ -158,12 +186,14 @@ class SwarmTest(BaseAPIIntegrationTest):
@requires_api_version('1.24')
def test_inspect_node(self):
- assert self.init_swarm()
+ node_id = self.init_swarm()
+ assert node_id
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
+ assert node_id == node['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
@@ -205,3 +235,21 @@ class SwarmTest(BaseAPIIntegrationTest):
self.client.remove_node(node_id, True)
assert e.value.response.status_code >= 400
+
+ @requires_api_version('1.25')
+ def test_rotate_manager_unlock_key(self):
+ spec = self.client.create_swarm_spec(autolock_managers=True)
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ key_1 = self.client.get_unlock_key()
+ assert self.client.update_swarm(
+ version=swarm_info['Version']['Index'],
+ rotate_manager_unlock_key=True
+ )
+ key_2 = self.client.get_unlock_key()
+ assert key_1['UnlockKey'] != key_2['UnlockKey']
+
+ @requires_api_version('1.30')
+ @pytest.mark.xfail(reason='Can fail if eth0 has multiple IP addresses')
+ def test_init_swarm_data_path_addr(self):
+ assert self.init_swarm(data_path_addr='eth0')
diff --git a/tests/integration/base.py b/tests/integration/base.py
index 56c23ed..031079c 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -3,11 +3,10 @@ import shutil
import unittest
import docker
-from docker.utils import kwargs_from_env
-
from .. import helpers
+from docker.utils import kwargs_from_env
-BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_IMG = 'alpine:3.10'
TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
@@ -29,41 +28,44 @@ class BaseIntegrationTest(unittest.TestCase):
def tearDown(self):
client = docker.from_env(version=TEST_API_VERSION)
- for img in self.tmp_imgs:
- try:
- client.api.remove_image(img)
- except docker.errors.APIError:
- pass
- for container in self.tmp_containers:
- try:
- client.api.remove_container(container, force=True, v=True)
- except docker.errors.APIError:
- pass
- for network in self.tmp_networks:
- try:
- client.api.remove_network(network)
- except docker.errors.APIError:
- pass
- for volume in self.tmp_volumes:
- try:
- client.api.remove_volume(volume)
- except docker.errors.APIError:
- pass
-
- for secret in self.tmp_secrets:
- try:
- client.api.remove_secret(secret)
- except docker.errors.APIError:
- pass
-
- for config in self.tmp_configs:
- try:
- client.api.remove_config(config)
- except docker.errors.APIError:
- pass
-
- for folder in self.tmp_folders:
- shutil.rmtree(folder)
+ try:
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+ finally:
+ client.close()
class BaseAPIIntegrationTest(BaseIntegrationTest):
@@ -73,11 +75,11 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
"""
def setUp(self):
- super(BaseAPIIntegrationTest, self).setUp()
+ super().setUp()
self.client = self.get_client_instance()
def tearDown(self):
- super(BaseAPIIntegrationTest, self).tearDown()
+ super().tearDown()
self.client.close()
@staticmethod
@@ -106,7 +108,7 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
return container
- def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
+ def create_and_start(self, image=TEST_IMG, command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 4e8d268..ae94595 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import sys
import warnings
@@ -7,7 +5,7 @@ import docker.errors
from docker.utils import kwargs_from_env
import pytest
-from .base import BUSYBOX
+from .base import TEST_IMG
@pytest.fixture(autouse=True, scope='session')
@@ -15,15 +13,15 @@ def setup_test_session():
warnings.simplefilter('error')
c = docker.APIClient(version='auto', **kwargs_from_env())
try:
- c.inspect_image(BUSYBOX)
+ c.inspect_image(TEST_IMG)
except docker.errors.NotFound:
- print("\npulling {0}".format(BUSYBOX), file=sys.stderr)
- for data in c.pull(BUSYBOX, stream=True, decode=True):
+ print(f"\npulling {TEST_IMG}", file=sys.stderr)
+ for data in c.pull(TEST_IMG, stream=True, decode=True):
status = data.get("status")
progress = data.get("progress")
- detail = "{0} - {1}".format(status, progress)
+ detail = f"{status} - {progress}"
print(detail, file=sys.stderr)
# Double make sure we now have busybox
- c.inspect_image(BUSYBOX)
+ c.inspect_image(TEST_IMG)
c.close()
diff --git a/tests/integration/context_api_test.py b/tests/integration/context_api_test.py
new file mode 100644
index 0000000..a2a12a5
--- /dev/null
+++ b/tests/integration/context_api_test.py
@@ -0,0 +1,59 @@
+import os
+import tempfile
+import pytest
+from docker import errors
+from docker.context import ContextAPI
+from docker.tls import TLSConfig
+from .base import BaseAPIIntegrationTest
+
+
+class ContextLifecycleTest(BaseAPIIntegrationTest):
+ def test_lifecycle(self):
+ assert ContextAPI.get_context().Name == "default"
+ assert not ContextAPI.get_context("test")
+ assert ContextAPI.get_current_context().Name == "default"
+
+ dirpath = tempfile.mkdtemp()
+ ca = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "ca.pem"), mode="r")
+ cert = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "cert.pem"), mode="r")
+ key = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "key.pem"), mode="r")
+
+ # create context 'test
+ docker_tls = TLSConfig(
+ client_cert=(cert.name, key.name),
+ ca_cert=ca.name)
+ ContextAPI.create_context(
+ "test", tls_cfg=docker_tls)
+
+ # check for a context 'test' in the context store
+ assert any([ctx.Name == "test" for ctx in ContextAPI.contexts()])
+ # retrieve a context object for 'test'
+ assert ContextAPI.get_context("test")
+ # remove context
+ ContextAPI.remove_context("test")
+ with pytest.raises(errors.ContextNotFound):
+ ContextAPI.inspect_context("test")
+ # check there is no 'test' context in store
+ assert not ContextAPI.get_context("test")
+
+ ca.close()
+ key.close()
+ cert.close()
+
+ def test_context_remove(self):
+ ContextAPI.create_context("test")
+ assert ContextAPI.inspect_context("test")["Name"] == "test"
+
+ ContextAPI.remove_context("test")
+ with pytest.raises(errors.ContextNotFound):
+ ContextAPI.inspect_context("test")
+
+ def test_load_context_without_orchestrator(self):
+ ContextAPI.create_context("test")
+ ctx = ContextAPI.get_context("test")
+ assert ctx
+ assert ctx.Name == "test"
+ assert ctx.Orchestrator is None
diff --git a/tests/integration/credentials/__init__.py b/tests/integration/credentials/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/credentials/__init__.py
diff --git a/tests/integration/credentials/store_test.py b/tests/integration/credentials/store_test.py
new file mode 100644
index 0000000..d0cfd54
--- /dev/null
+++ b/tests/integration/credentials/store_test.py
@@ -0,0 +1,86 @@
+import os
+import random
+import sys
+
+import pytest
+from distutils.spawn import find_executable
+
+from docker.credentials import (
+ CredentialsNotFound, Store, StoreError, DEFAULT_LINUX_STORE,
+ DEFAULT_OSX_STORE
+)
+
+
+class TestStore:
+ def teardown_method(self):
+ for server in self.tmp_keys:
+ try:
+ self.store.erase(server)
+ except StoreError:
+ pass
+
+ def setup_method(self):
+ self.tmp_keys = []
+ if sys.platform.startswith('linux'):
+ if find_executable('docker-credential-' + DEFAULT_LINUX_STORE):
+ self.store = Store(DEFAULT_LINUX_STORE)
+ elif find_executable('docker-credential-pass'):
+ self.store = Store('pass')
+ else:
+ raise Exception('No supported docker-credential store in PATH')
+ elif sys.platform.startswith('darwin'):
+ self.store = Store(DEFAULT_OSX_STORE)
+
+ def get_random_servername(self):
+ res = f'pycreds_test_{random.getrandbits(32):x}'
+ self.tmp_keys.append(res)
+ return res
+
+ def test_store_and_get(self):
+ key = self.get_random_servername()
+ self.store.store(server=key, username='user', secret='pass')
+ data = self.store.get(key)
+ assert data == {
+ 'ServerURL': key,
+ 'Username': 'user',
+ 'Secret': 'pass'
+ }
+
+ def test_get_nonexistent(self):
+ key = self.get_random_servername()
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_store_and_erase(self):
+ key = self.get_random_servername()
+ self.store.store(server=key, username='user', secret='pass')
+ self.store.erase(key)
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_unicode_strings(self):
+ key = self.get_random_servername()
+ key = key
+ self.store.store(server=key, username='user', secret='pass')
+ data = self.store.get(key)
+ assert data
+ self.store.erase(key)
+ with pytest.raises(CredentialsNotFound):
+ self.store.get(key)
+
+ def test_list(self):
+ names = (self.get_random_servername(), self.get_random_servername())
+ self.store.store(names[0], username='sakuya', secret='izayoi')
+ self.store.store(names[1], username='reimu', secret='hakurei')
+ data = self.store.list()
+ assert names[0] in data
+ assert data[names[0]] == 'sakuya'
+ assert names[1] in data
+ assert data[names[1]] == 'reimu'
+
+ def test_execute_with_env_override(self):
+ self.store.exe = 'env'
+ self.store.environment = {'FOO': 'bar'}
+ data = self.store._execute('--null', '')
+ assert b'\0FOO=bar\0' in data
+ assert 'FOO' not in os.environ
diff --git a/tests/integration/credentials/utils_test.py b/tests/integration/credentials/utils_test.py
new file mode 100644
index 0000000..d7b2a1a
--- /dev/null
+++ b/tests/integration/credentials/utils_test.py
@@ -0,0 +1,22 @@
+import os
+
+from docker.credentials.utils import create_environment_dict
+
+try:
+ from unittest import mock
+except ImportError:
+ from unittest import mock
+
+
+@mock.patch.dict(os.environ)
+def test_create_environment_dict():
+ base = {'FOO': 'bar', 'BAZ': 'foobar'}
+ os.environ = base
+ assert create_environment_dict({'FOO': 'baz'}) == {
+ 'FOO': 'baz', 'BAZ': 'foobar',
+ }
+ assert create_environment_dict({'HELLO': 'world'}) == {
+ 'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world',
+ }
+
+ assert os.environ == base
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
index ac74d72..7bf156a 100644
--- a/tests/integration/errors_test.py
+++ b/tests/integration/errors_test.py
@@ -1,11 +1,11 @@
from docker.errors import APIError
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class ErrorsTest(BaseAPIIntegrationTest):
def test_api_error_parses_json(self):
- container = self.client.create_container(BUSYBOX, ['sleep', '10'])
+ container = self.client.create_container(TEST_IMG, ['sleep', '10'])
self.client.start(container['Id'])
with pytest.raises(APIError) as cm:
self.client.remove_container(container['Id'])
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
index ab41ea5..eac4c97 100644
--- a/tests/integration/models_containers_test.py
+++ b/tests/integration/models_containers_test.py
@@ -1,10 +1,14 @@
+import os
import tempfile
import threading
-import docker
import pytest
-from .base import BaseIntegrationTest, TEST_API_VERSION
-from ..helpers import random_name, requires_api_version
+
+import docker
+from ..helpers import random_name
+from ..helpers import requires_api_version
+from .base import BaseIntegrationTest
+from .base import TEST_API_VERSION
class ContainerCollectionTest(BaseIntegrationTest):
@@ -122,7 +126,9 @@ class ContainerCollectionTest(BaseIntegrationTest):
def test_run_with_auto_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
- 'alpine', 'echo hello', auto_remove=True
+ # sleep(2) to allow any communication with the container
+ # before it gets removed by the host.
+ 'alpine', 'sh -c "echo hello && sleep 2"', auto_remove=True
)
assert out == b'hello\n'
@@ -131,7 +137,10 @@ class ContainerCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.ContainerError) as e:
client.containers.run(
- 'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
+ # sleep(2) to allow any communication with the container
+ # before it gets removed by the host.
+ 'alpine', 'sh -c ">&2 echo error && sleep 2 && exit 1"',
+ auto_remove=True
)
assert e.value.exit_status == 1
assert e.value.stderr is None
@@ -146,6 +155,8 @@ class ContainerCollectionTest(BaseIntegrationTest):
assert logs[1] == b'world\n'
@pytest.mark.timeout(5)
+ @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
+ reason='No cancellable streams over SSH')
def test_run_with_streamed_logs_and_cancel(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
@@ -160,6 +171,17 @@ class ContainerCollectionTest(BaseIntegrationTest):
assert logs[0] == b'hello\n'
assert logs[1] == b'world\n'
+ def test_run_with_proxy_config(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.api._proxy_configs = docker.utils.proxy.ProxyConfig(
+ ftp='sakuya.jp:4967'
+ )
+
+ out = client.containers.run('alpine', 'sh -c "env"')
+
+ assert b'FTP_PROXY=sakuya.jp:4967\n' in out
+ assert b'ftp_proxy=sakuya.jp:4967\n' in out
+
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
@@ -325,6 +347,66 @@ class ContainerTest(BaseIntegrationTest):
'memory_stats', 'blkio_stats']:
assert key in stats
+ def test_ports_target_none(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = None
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host is ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
+ def test_ports_target_tuple(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = ('127.0.0.1', 1111)
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host == ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
+ def test_ports_target_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ ports = [1234, 4567]
+ target_ports = {'2222/tcp': ports}
+ container = client.containers.run(
+ "alpine", "sleep 100", detach=True,
+ ports=target_ports
+ )
+ self.tmp_containers.append(container.id)
+ container.reload() # required to get auto-assigned ports
+ actual_ports = container.ports
+ assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
+ for target_client, target_host in target_ports.items():
+ for actual_port in actual_ports[target_client]:
+ actual_keys = sorted(actual_port.keys())
+ assert sorted(['HostIp', 'HostPort']) == actual_keys
+ assert target_host == ports
+ assert int(actual_port['HostPort']) > 0
+ client.close()
+
def test_stop(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "top", detach=True)
@@ -362,3 +444,13 @@ class ContainerTest(BaseIntegrationTest):
detach=True)
self.tmp_containers.append(container.id)
assert container.wait()['StatusCode'] == 1
+
+ def test_create_with_volume_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.create(
+ 'alpine',
+ 'sleep 300',
+ volume_driver='foo'
+ )
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['VolumeDriver'] == 'foo'
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
index ae735ba..94aa201 100644
--- a/tests/integration/models_images_test.py
+++ b/tests/integration/models_images_test.py
@@ -4,7 +4,8 @@ import tempfile
import docker
import pytest
-from .base import BaseIntegrationTest, BUSYBOX, TEST_API_VERSION
+from .base import BaseIntegrationTest, TEST_IMG, TEST_API_VERSION
+from ..helpers import random_name
class ImageCollectionTest(BaseIntegrationTest):
@@ -12,8 +13,8 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo hello world".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo hello world"
))
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
@@ -23,8 +24,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
- "FROM alpine\n"
- "RUN exit 1".encode('ascii')
+ b"FROM alpine\n"
+ b"RUN exit 1"
))
assert (
"The command '/bin/sh -c exit 1' returned a non-zero code: 1"
@@ -35,8 +36,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='some-tag', fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo hello world".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo hello world"
)
)
self.tmp_imgs.append(image.id)
@@ -46,8 +47,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='dup-txt-tag', fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo Successfully built abcd1234".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo Successfully built abcd1234"
)
)
self.tmp_imgs.append(image.id)
@@ -71,8 +72,8 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_pull_with_tag(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.pull('alpine', tag='3.3')
- assert 'alpine:3.3' in image.attrs['RepoTags']
+ image = client.images.pull('alpine', tag='3.10')
+ assert 'alpine:3.10' in image.attrs['RepoTags']
def test_pull_with_sha(self):
image_ref = (
@@ -85,9 +86,11 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_pull_multiple(self):
client = docker.from_env(version=TEST_API_VERSION)
- images = client.images.pull('hello-world')
- assert len(images) == 1
- assert 'hello-world:latest' in images[0].attrs['RepoTags']
+ images = client.images.pull('hello-world', all_tags=True)
+ assert len(images) >= 1
+ assert any([
+ 'hello-world:latest' in img.attrs['RepoTags'] for img in images
+ ])
def test_load_error(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -96,7 +99,7 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_save_and_load(self):
client = docker.from_env(version=TEST_API_VERSION)
- image = client.images.get(BUSYBOX)
+ image = client.images.get(TEST_IMG)
with tempfile.TemporaryFile() as f:
stream = image.save()
for chunk in stream:
@@ -108,13 +111,39 @@ class ImageCollectionTest(BaseIntegrationTest):
assert len(result) == 1
assert result[0].id == image.id
+ def test_save_and_load_repo_name(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(TEST_IMG)
+ additional_tag = random_name()
+ image.tag(additional_tag)
+ self.tmp_imgs.append(additional_tag)
+ image.reload()
+ with tempfile.TemporaryFile() as f:
+ stream = image.save(named=f'{additional_tag}:latest')
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ client.images.remove(additional_tag, force=True)
+ result = client.images.load(f.read())
+
+ assert len(result) == 1
+ assert result[0].id == image.id
+ assert f'{additional_tag}:latest' in result[0].tags
+
+ def test_save_name_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(TEST_IMG)
+ with pytest.raises(docker.errors.InvalidArgument):
+ image.save(named='sakuya/izayoi')
+
class ImageTest(BaseIntegrationTest):
def test_tag_and_remove(self):
repo = 'dockersdk.tests.images.test_tag'
tag = 'some-tag'
- identifier = '{}:{}'.format(repo, tag)
+ identifier = f'{repo}:{tag}'
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
index 36caa85..982842b 100644
--- a/tests/integration/models_services_test.py
+++ b/tests/integration/models_services_test.py
@@ -333,3 +333,41 @@ class ServiceTest(unittest.TestCase):
assert service.force_update()
service.reload()
assert service.version > initial_version
+
+ @helpers.requires_api_version('1.41')
+ def test_create_cap_add(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ labels={'foo': 'bar'},
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'},
+ cap_add=["CAP_SYSLOG"]
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+ assert "CAP_SYSLOG" in container_spec["CapabilityAdd"]
+
+ @helpers.requires_api_version('1.41')
+ def test_create_cap_drop(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ labels={'foo': 'bar'},
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'},
+ cap_drop=["CAP_SYSLOG"]
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+ assert "CAP_SYSLOG" in container_spec["CapabilityDrop"]
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
index f39f0d3..6c1836d 100644
--- a/tests/integration/models_swarm_test.py
+++ b/tests/integration/models_swarm_test.py
@@ -31,3 +31,15 @@ class SwarmTest(unittest.TestCase):
cm.value.response.status_code == 406 or
cm.value.response.status_code == 503
)
+
+ def test_join_on_already_joined_swarm(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init()
+ join_token = client.swarm.attrs['JoinTokens']['Manager']
+ with pytest.raises(docker.errors.APIError) as cm:
+ client.swarm.join(
+ remote_addrs=['127.0.0.1'],
+ join_token=join_token,
+ )
+ assert cm.value.response.status_code == 503
+ assert 'This node is already part of a swarm.' in cm.value.explanation
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index 0fd4e43..deb9aff 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -2,9 +2,8 @@ import io
import random
import docker
-import six
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
@@ -14,12 +13,12 @@ class TestRegressions(BaseAPIIntegrationTest):
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
- assert exc.value.response.status_code == 500
+ assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
- self.client.create_container(BUSYBOX, ['true'])
+ self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
@@ -30,24 +29,23 @@ class TestRegressions(BaseAPIIntegrationTest):
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
- ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
+ ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
- ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
+ ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
- BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
+ TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
@@ -56,10 +54,10 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
- )[0]['HostPort'] == six.text_type(tcp_port)
+ )[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
- )[0]['HostPort'] == six.text_type(tcp_port)
+ )[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
- )[0]['HostPort'] == six.text_type(udp_port)
+ )[0]['HostPort'] == str(udp_port)
diff --git a/tests/ssh/__init__.py b/tests/ssh/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/ssh/__init__.py
diff --git a/tests/ssh/api_build_test.py b/tests/ssh/api_build_test.py
new file mode 100644
index 0000000..ef48e12
--- /dev/null
+++ b/tests/ssh/api_build_test.py
@@ -0,0 +1,590 @@
+import io
+import os
+import shutil
+import tempfile
+
+from docker import errors
+from docker.utils.proxy import ProxyConfig
+
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_IMG
+from ..helpers import random_name, requires_api_version, requires_experimental
+
+
+class BuildTest(BaseAPIIntegrationTest):
+ def test_build_with_proxy(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=a"',
+ 'RUN env | grep "ftp_proxy=a"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script, decode=True)
+
+ def test_build_with_proxy_and_buildargs(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=XXX"',
+ 'RUN env | grep "ftp_proxy=xxx"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(
+ fileobj=script,
+ decode=True,
+ buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'}
+ )
+
+ def test_build_streaming(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ stream = self.client.build(fileobj=script, decode=True)
+ logs = []
+ for chunk in stream:
+ logs.append(chunk)
+ assert len(logs) > 0
+
+ def test_build_from_stringio(self):
+ return
+ script = io.StringIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]))
+ stream = self.client.build(fileobj=script)
+ logs = ''
+ for chunk in stream:
+ chunk = chunk.decode('utf-8')
+ logs += chunk
+ assert logs != ''
+
+ def test_build_with_dockerignore(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write("\n".join([
+ 'ignored',
+ 'Dockerfile',
+ '.dockerignore',
+ '!ignored/subdir/excepted-file',
+ '', # empty line,
+ '#*', # comment line
+ ]))
+
+ with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
+ f.write('this file should not be ignored')
+
+ subdir = os.path.join(base_dir, 'ignored', 'subdir')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'file'), 'w') as f:
+ f.write("this file should be ignored")
+
+ with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ tag = 'docker-py-test-build-with-dockerignore'
+ stream = self.client.build(
+ path=base_dir,
+ tag=tag,
+ )
+ for chunk in stream:
+ pass
+
+ c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
+ self.client.start(c)
+ self.client.wait(c)
+ logs = self.client.logs(c)
+
+ logs = logs.decode('utf-8')
+
+ assert sorted(list(filter(None, logs.split('\n')))) == sorted([
+ '/test/#file.txt',
+ '/test/ignored/subdir/excepted-file',
+ '/test/not-ignored'
+ ])
+
+ def test_build_with_buildargs(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'ARG test',
+ 'USER $test'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
+ )
+ self.tmp_imgs.append('buildargs')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('buildargs')
+ assert info['Config']['User'] == 'OK'
+
+ @requires_api_version('1.22')
+ def test_build_shmsize(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Hello, World!\'"',
+ ]).encode('ascii'))
+
+ tag = 'shmsize'
+ shmsize = 134217728
+
+ stream = self.client.build(
+ fileobj=script, tag=tag, shmsize=shmsize
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ # There is currently no way to get the shmsize
+ # that was used to build the image
+
+ @requires_api_version('1.24')
+ def test_build_isolation(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Deaf To All But The Song\''
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='isolation',
+ isolation='default'
+ )
+
+ for chunk in stream:
+ pass
+
+ @requires_api_version('1.23')
+ def test_build_labels(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ ]).encode('ascii'))
+
+ labels = {'test': 'OK'}
+
+ stream = self.client.build(
+ fileobj=script, tag='labels', labels=labels
+ )
+ self.tmp_imgs.append('labels')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('labels')
+ assert info['Config']['Labels'] == labels
+
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
+ @requires_api_version('1.29')
+ def test_build_container_with_target(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox as first',
+ 'RUN mkdir -p /tmp/test',
+ 'RUN touch /tmp/silence.tar.gz',
+ 'FROM alpine:latest',
+ 'WORKDIR /root/'
+ 'COPY --from=first /tmp/silence.tar.gz .',
+ 'ONBUILD RUN echo "This should not be in the final image"'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, target='first', tag='build1'
+ )
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('build1')
+ assert not info['Config']['OnBuild']
+
+ @requires_api_version('1.25')
+ def test_build_with_network_mode(self):
+ # Set up pingable endpoint on custom network
+ network = self.client.create_network(random_name())['Id']
+ self.tmp_networks.append(network)
+ container = self.client.create_container(TEST_IMG, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.connect_container_to_network(
+ container, network, aliases=['pingtarget.docker']
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 pingtarget.docker'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, network_mode=network,
+ tag='dockerpytest_customnetbuild'
+ )
+
+ self.tmp_imgs.append('dockerpytest_customnetbuild')
+ for chunk in stream:
+ pass
+
+ assert self.client.inspect_image('dockerpytest_customnetbuild')
+
+ script.seek(0)
+ stream = self.client.build(
+ fileobj=script, network_mode='none',
+ tag='dockerpytest_nonebuild', nocache=True, decode=True
+ )
+
+ self.tmp_imgs.append('dockerpytest_nonebuild')
+ logs = [chunk for chunk in stream]
+ assert 'errorDetail' in logs[-1]
+ assert logs[-1]['errorDetail']['code'] == 1
+
+ with pytest.raises(errors.NotFound):
+ self.client.inspect_image('dockerpytest_nonebuild')
+
+ @requires_api_version('1.27')
+ def test_build_with_extra_hosts(self):
+ img_name = 'dockerpytest_extrahost_build'
+ self.tmp_imgs.append(img_name)
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 hello.world.test',
+ 'RUN ping -c1 extrahost.local.test',
+ 'RUN cp /etc/hosts /hosts-file'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag=img_name,
+ extra_hosts={
+ 'extrahost.local.test': '127.0.0.1',
+ 'hello.world.test': '127.0.0.1',
+ }, decode=True
+ )
+ for chunk in stream:
+ if 'errorDetail' in chunk:
+ pytest.fail(chunk)
+
+ assert self.client.inspect_image(img_name)
+ ctnr = self.run_container(img_name, 'cat /hosts-file')
+ logs = self.client.logs(ctnr)
+ logs = logs.decode('utf-8')
+ assert '127.0.0.1\textrahost.local.test' in logs
+ assert '127.0.0.1\thello.world.test' in logs
+
+ @requires_experimental(until=None)
+ @requires_api_version('1.25')
+ def test_build_squash(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN echo blah > /file_1',
+ 'RUN echo blahblah > /file_2',
+ 'RUN echo blahblahblah > /file_3'
+ ]).encode('ascii'))
+
+ def build_squashed(squash):
+ tag = 'squash' if squash else 'nosquash'
+ stream = self.client.build(
+ fileobj=script, tag=tag, squash=squash
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ return self.client.inspect_image(tag)
+
+ non_squashed = build_squashed(False)
+ squashed = build_squashed(True)
+ assert len(non_squashed['RootFS']['Layers']) == 4
+ assert len(squashed['RootFS']['Layers']) == 2
+
+ def test_build_stderr_data(self):
+ control_chars = ['\x1b[91m', '\x1b[0m']
+ snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
+ script = io.BytesIO(b'\n'.join([
+ b'FROM busybox',
+ f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
+ ]))
+
+ stream = self.client.build(
+ fileobj=script, decode=True, nocache=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk.get('stream'))
+ expected = '{0}{2}\n{1}'.format(
+ control_chars[0], control_chars[1], snippet
+ )
+ assert any([line == expected for line in lines])
+
+ def test_build_gzip_encoding(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True,
+ gzip=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_with_dockerfile_empty_lines(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('\n'.join([
+ ' ',
+ '',
+ '\t\t',
+ '\t ',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_gzip_custom_encoding(self):
+ with pytest.raises(errors.DockerException):
+ self.client.build(path='.', gzip=True, encoding='text/html')
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_build_invalid_platform(self):
+ script = io.BytesIO(b'FROM busybox\n')
+
+ with pytest.raises(errors.APIError) as excinfo:
+ stream = self.client.build(fileobj=script, platform='foobar')
+ for _ in stream:
+ pass
+
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
+
+ def test_build_out_of_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('.dockerignore\n')
+ df_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, df_dir)
+ df_name = os.path.join(df_dir, 'Dockerfile')
+ with open(df_name, 'wb') as df:
+ df.write(('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ])).encode('utf-8'))
+ df.flush()
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=df_name, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 3
+ assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
+
+ def test_build_in_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_nested_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ subdir = os.path.join(base_dir, 'hello', 'world')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='hello/world/custom.dockerfile',
+ tag=img_name, decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'hello']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_abs_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile')
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(abs_dockerfile_path, 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ @requires_api_version('1.31')
+ @pytest.mark.xfail(
+ True,
+ reason='Currently fails on 18.09: '
+ 'https://github.com/moby/moby/issues/37920'
+ )
+ def test_prune_builds(self):
+ prune_result = self.client.prune_builds()
+ assert 'SpaceReclaimed' in prune_result
+ assert isinstance(prune_result['SpaceReclaimed'], int)
diff --git a/tests/ssh/base.py b/tests/ssh/base.py
new file mode 100644
index 0000000..4825227
--- /dev/null
+++ b/tests/ssh/base.py
@@ -0,0 +1,130 @@
+import os
+import shutil
+import unittest
+
+import docker
+from .. import helpers
+from docker.utils import kwargs_from_env
+
+TEST_IMG = 'alpine:3.10'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
+
+
+class BaseIntegrationTest(unittest.TestCase):
+ """
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
+ """
+
+ def setUp(self):
+ self.tmp_imgs = []
+ self.tmp_containers = []
+ self.tmp_folders = []
+ self.tmp_volumes = []
+ self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
+ self.tmp_configs = []
+
+ def tearDown(self):
+ client = docker.from_env(version=TEST_API_VERSION, use_ssh_client=True)
+ try:
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+ finally:
+ client.close()
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+ @classmethod
+ def setUpClass(cls):
+ cls.client = cls.get_client_instance()
+ cls.client.pull(TEST_IMG)
+
+ def tearDown(self):
+ super().tearDown()
+ self.client.close()
+
+ @staticmethod
+ def get_client_instance():
+ return docker.APIClient(
+ version=TEST_API_VERSION,
+ timeout=60,
+ use_ssh_client=True,
+ **kwargs_from_env()
+ )
+
+ @staticmethod
+ def _init_swarm(client, **kwargs):
+ return client.init_swarm(
+ '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
+
+ def run_container(self, *args, **kwargs):
+ container = self.client.create_container(*args, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+
+ if exitcode != 0:
+ output = self.client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}"
+ .format(exitcode, output))
+
+ return container
+
+ def create_and_start(self, image=TEST_IMG, command='top', **kwargs):
+ container = self.client.create_container(
+ image=image, command=command, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ return container
+
+ def execute(self, container, cmd, exit_code=0, **kwargs):
+ exc = self.client.exec_create(container, cmd, **kwargs)
+ output = self.client.exec_start(exc)
+ actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
+ msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
+ " ".join(cmd), exit_code, actual_exit_code, output)
+ assert actual_exit_code == exit_code, msg
+
+ def init_swarm(self, **kwargs):
+ return self._init_swarm(self.client, **kwargs)
diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py
index a7f34fd..7e07a26 100644
--- a/tests/unit/api_build_test.py
+++ b/tests/unit/api_build_test.py
@@ -1,12 +1,16 @@
import gzip
import io
+import shutil
import docker
from docker import auth
+from docker.api.build import process_dockerfile
-from .api_test import BaseAPIClientTest, fake_request, url_prefix
import pytest
+from ..helpers import make_tree
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
+
class BuildTest(BaseAPIClientTest):
def test_build_container(self):
@@ -61,7 +65,7 @@ class BuildTest(BaseAPIClientTest):
)
def test_build_remote_with_registry_auth(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -69,7 +73,7 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
expected_params = {'t': None, 'q': False, 'dockerfile': None,
'rm': False, 'nocache': False, 'pull': False,
@@ -77,7 +81,7 @@ class BuildTest(BaseAPIClientTest):
'remote': 'https://github.com/docker-library/mongo'}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
)
}
@@ -111,7 +115,7 @@ class BuildTest(BaseAPIClientTest):
})
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -119,12 +123,12 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
headers = {}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
)
}
@@ -132,7 +136,7 @@ class BuildTest(BaseAPIClientTest):
assert headers == expected_headers
def test_set_auth_headers_with_dict_and_auth_configs(self):
- self.client._auth_configs = {
+ self.client._auth_configs = auth.AuthConfig({
'auths': {
'https://example.com': {
'user': 'example',
@@ -140,12 +144,12 @@ class BuildTest(BaseAPIClientTest):
'email': 'example@example.com'
}
}
- }
+ })
headers = {'foo': 'bar'}
expected_headers = {
'X-Registry-Config': auth.encode_header(
- self.client._auth_configs['auths']
+ self.client._auth_configs.auths
),
'foo': 'bar'
}
@@ -161,3 +165,61 @@ class BuildTest(BaseAPIClientTest):
self.client._set_auth_headers(headers)
assert headers == expected_headers
+
+ @pytest.mark.skipif(
+ not docker.constants.IS_WINDOWS_PLATFORM,
+ reason='Windows-specific syntax')
+ def test_process_dockerfile_win_longpath_prefix(self):
+ dirs = [
+ 'foo', 'foo/bar', 'baz',
+ ]
+
+ files = [
+ 'Dockerfile', 'foo/Dockerfile.foo', 'foo/bar/Dockerfile.bar',
+ 'baz/Dockerfile.baz',
+ ]
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ def pre(path):
+ return docker.constants.WINDOWS_LONGPATH_PREFIX + path
+
+ assert process_dockerfile(None, pre(base)) == (None, None)
+ assert process_dockerfile('Dockerfile', pre(base)) == (
+ 'Dockerfile', None
+ )
+ assert process_dockerfile('foo/Dockerfile.foo', pre(base)) == (
+ 'foo/Dockerfile.foo', None
+ )
+ assert process_dockerfile(
+ '../Dockerfile', pre(base + '\\foo')
+ )[1] is not None
+ assert process_dockerfile(
+ '../baz/Dockerfile.baz', pre(base + '/baz')
+ ) == ('../baz/Dockerfile.baz', None)
+
+ def test_process_dockerfile(self):
+ dirs = [
+ 'foo', 'foo/bar', 'baz',
+ ]
+
+ files = [
+ 'Dockerfile', 'foo/Dockerfile.foo', 'foo/bar/Dockerfile.bar',
+ 'baz/Dockerfile.baz',
+ ]
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ assert process_dockerfile(None, base) == (None, None)
+ assert process_dockerfile('Dockerfile', base) == ('Dockerfile', None)
+ assert process_dockerfile('foo/Dockerfile.foo', base) == (
+ 'foo/Dockerfile.foo', None
+ )
+ assert process_dockerfile(
+ '../Dockerfile', base + '/foo'
+ )[1] is not None
+ assert process_dockerfile('../baz/Dockerfile.baz', base + '/baz') == (
+ '../baz/Dockerfile.baz', None
+ )
diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
index a7e183c..1ebd37d 100644
--- a/tests/unit/api_container_test.py
+++ b/tests/unit/api_container_test.py
@@ -1,24 +1,22 @@
-# -*- coding: utf-8 -*-
-
import datetime
import json
import signal
import docker
+from docker.api import APIClient
import pytest
-import six
from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
- fake_inspect_container
+ fake_inspect_container, url_base
)
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
def fake_inspect_container_tty(self, container):
@@ -767,10 +765,71 @@ class CreateContainerTest(BaseAPIClientTest):
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+ def test_create_container_with_device_requests(self):
+ client = APIClient(version='1.40')
+ fake_api.fake_responses.setdefault(
+ f'{fake_api.prefix}/v1.40/containers/create',
+ fake_api.post_fake_create_container,
+ )
+ client.create_container(
+ 'busybox', 'true', host_config=client.create_host_config(
+ device_requests=[
+ {
+ 'device_ids': [
+ '0',
+ 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ ]
+ },
+ {
+ 'driver': 'nvidia',
+ 'Count': -1,
+ 'capabilities': [
+ ['gpu', 'utility']
+ ],
+ 'options': {
+ 'key': 'value'
+ }
+ }
+ ]
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_base + 'v1.40/' + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = client.create_host_config()
+ expected_payload['HostConfig']['DeviceRequests'] = [
+ {
+ 'Driver': '',
+ 'Count': 0,
+ 'DeviceIDs': [
+ '0',
+ 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ ],
+ 'Capabilities': [],
+ 'Options': {}
+ },
+ {
+ 'Driver': 'nvidia',
+ 'Count': -1,
+ 'DeviceIDs': [],
+ 'Capabilities': [
+ ['gpu', 'utility']
+ ],
+ 'Options': {
+ 'key': 'value'
+ }
+ }
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers']['Content-Type'] == 'application/json'
+ assert set(args[1]['headers']) <= {'Content-Type', 'User-Agent'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
def test_create_container_with_labels_dict(self):
labels_dict = {
- six.text_type('foo'): six.text_type('1'),
- six.text_type('bar'): six.text_type('2'),
+ 'foo': '1',
+ 'bar': '2',
}
self.client.create_container(
@@ -786,12 +845,12 @@ class CreateContainerTest(BaseAPIClientTest):
def test_create_container_with_labels_list(self):
labels_list = [
- six.text_type('foo'),
- six.text_type('bar'),
+ 'foo',
+ 'bar',
]
labels_dict = {
- six.text_type('foo'): six.text_type(),
- six.text_type('bar'): six.text_type(),
+ 'foo': '',
+ 'bar': '',
}
self.client.create_container(
@@ -951,11 +1010,11 @@ class CreateContainerTest(BaseAPIClientTest):
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
- 'foo': u'☃',
+ 'foo': '☃',
}
expected = [
- u'foo=☃'
+ 'foo=☃'
]
self.client.create_container(
@@ -1076,7 +1135,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+ assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n'
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
@@ -1092,7 +1151,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+ assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n'
def test_log_streaming(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py
index a9d2dd5..4504250 100644
--- a/tests/unit/api_exec_test.py
+++ b/tests/unit/api_exec_test.py
@@ -11,7 +11,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
- assert 'POST' == args[0][0], url_prefix + 'containers/{0}/exec'.format(
+ assert 'POST' == args[0][0], url_prefix + 'containers/{}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
@@ -32,7 +32,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/start'.format(
fake_api.FAKE_EXEC_ID
)
@@ -51,7 +51,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/start'.format(
fake_api.FAKE_EXEC_ID
)
@@ -68,7 +68,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/json'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/json'.format(
fake_api.FAKE_EXEC_ID
)
@@ -77,7 +77,7 @@ class ExecTest(BaseAPIClientTest):
fake_request.assert_called_with(
'POST',
- url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
+ url_prefix + f'exec/{fake_api.FAKE_EXEC_ID}/resize',
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py
index 1e2315d..843c11b 100644
--- a/tests/unit/api_image_test.py
+++ b/tests/unit/api_image_test.py
@@ -11,7 +11,7 @@ from .api_test import (
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class ImageTest(BaseAPIClientTest):
@@ -26,7 +26,18 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 0, 'all': 1},
+ params={'only_ids': 0, 'all': 1},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_images_name(self):
+ self.client.images('foo:bar')
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'only_ids': 0, 'all': 0,
+ 'filters': '{"reference": ["foo:bar"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -36,7 +47,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 1, 'all': 1},
+ params={'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -46,7 +57,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 1, 'all': 0},
+ params={'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -56,7 +67,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 0, 'all': 0,
+ params={'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -67,7 +78,7 @@ class ImageTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][1] == url_prefix + 'images/create'
assert args[1]['params'] == {
- 'tag': None, 'fromImage': 'joffrey/test001'
+ 'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert not args[1]['stream']
@@ -77,7 +88,7 @@ class ImageTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][1] == url_prefix + 'images/create'
assert args[1]['params'] == {
- 'tag': None, 'fromImage': 'joffrey/test001'
+ 'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert args[1]['stream']
diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py
index c78554d..84d6544 100644
--- a/tests/unit/api_network_test.py
+++ b/tests/unit/api_network_test.py
@@ -1,14 +1,12 @@
import json
-import six
-
from .api_test import BaseAPIClientTest, url_prefix, response
from docker.types import IPAMConfig, IPAMPool
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class NetworkTest(BaseAPIClientTest):
@@ -103,16 +101,16 @@ class NetworkTest(BaseAPIClientTest):
self.client.remove_network(network_id)
args = delete.call_args
- assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+ assert args[0][0] == url_prefix + f'networks/{network_id}'
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
- six.u('name'): network_name,
- six.u('id'): network_id,
- six.u('driver'): 'bridge',
- six.u('containers'): {},
+ 'name': network_name,
+ 'id': network_id,
+ 'driver': 'bridge',
+ 'containers': {},
}
network_response = response(status_code=200, content=network_data)
@@ -123,7 +121,7 @@ class NetworkTest(BaseAPIClientTest):
assert result == network_data
args = get.call_args
- assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+ assert args[0][0] == url_prefix + f'networks/{network_id}'
def test_connect_container_to_network(self):
network_id = 'abc12345'
@@ -136,11 +134,12 @@ class NetworkTest(BaseAPIClientTest):
container={'Id': container_id},
net_id=network_id,
aliases=['foo', 'bar'],
- links=[('baz', 'quux')]
+ links=[('baz', 'quux')],
+ driver_opt={'com.docker-py.setting': 'yes'},
)
assert post.call_args[0][0] == (
- url_prefix + 'networks/{0}/connect'.format(network_id)
+ url_prefix + f'networks/{network_id}/connect'
)
assert json.loads(post.call_args[1]['data']) == {
@@ -148,6 +147,7 @@ class NetworkTest(BaseAPIClientTest):
'EndpointConfig': {
'Aliases': ['foo', 'bar'],
'Links': ['baz:quux'],
+ 'DriverOpts': {'com.docker-py.setting': 'yes'},
},
}
@@ -162,7 +162,7 @@ class NetworkTest(BaseAPIClientTest):
container={'Id': container_id}, net_id=network_id)
assert post.call_args[0][0] == (
- url_prefix + 'networks/{0}/disconnect'.format(network_id)
+ url_prefix + f'networks/{network_id}/disconnect'
)
assert json.loads(post.call_args[1]['data']) == {
'Container': container_id
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index af2bb1c..dfc3816 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,29 +1,31 @@
import datetime
-import json
import io
+import json
import os
import re
import shutil
import socket
+import struct
import tempfile
import threading
import time
import unittest
+import socketserver
+import http.server
import docker
-from docker.api import APIClient
+import pytest
import requests
+from docker.api import APIClient
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from requests.packages import urllib3
-import six
from . import fake_api
-import pytest
-
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
@@ -33,7 +35,7 @@ def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
- if not isinstance(content, six.binary_type):
+ if not isinstance(content, bytes):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
@@ -59,7 +61,7 @@ def fake_resp(method, url, *args, **kwargs):
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
- raise Exception('{0} {1}'.format(method, url))
+ raise Exception(f'{method} {url}')
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
@@ -83,12 +85,12 @@ def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
-def fake_read_from_socket(self, response, stream, tty=False):
- return six.binary_type()
+def fake_read_from_socket(self, response, stream, tty=False, demux=False):
+ return bytes()
-url_base = '{0}/'.format(fake_api.prefix)
-url_prefix = '{0}v{1}/'.format(
+url_base = f'{fake_api.prefix}/'
+url_prefix = '{}v{}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
@@ -104,9 +106,7 @@ class BaseAPIClientTest(unittest.TestCase):
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
- self.client = APIClient()
- # Force-clear authconfig to avoid tampering with the tests
- self.client._cfg = {'Configs': {}}
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
def tearDown(self):
self.client.close()
@@ -134,20 +134,20 @@ class DockerApiTest(BaseAPIClientTest):
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
- assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
+ assert url == '{}{}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
- assert url == '{0}{1}'.format(
+ assert url == '{}{}'.format(
url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
- assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
+ assert url == '{}{}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
- assert url == '{0}{1}'.format(
+ assert url == '{}{}'.format(
url_prefix, 'images/localhost:5000/image/push'
)
@@ -157,13 +157,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_url_no_resource(self):
url = self.client._url('/simple')
- assert url == '{0}{1}'.format(url_prefix, 'simple')
+ assert url == '{}{}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
- assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
+ assert url == '{}{}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
@@ -185,13 +185,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_retrieve_server_version(self):
client = APIClient(version="auto")
- assert isinstance(client._version, six.string_types)
+ assert isinstance(client._version, str)
assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
- assert isinstance(version, six.string_types)
+ assert isinstance(version, str)
def test_info(self):
self.client.info()
@@ -221,13 +221,11 @@ class DockerApiTest(BaseAPIClientTest):
'username': 'sakuya', 'password': 'izayoi'
}
assert args[1]['headers'] == {'Content-Type': 'application/json'}
- assert self.client._auth_configs['auths'] == {
- 'docker.io': {
- 'email': None,
- 'password': 'izayoi',
- 'username': 'sakuya',
- 'serveraddress': None,
- }
+ assert self.client._auth_configs.auths['docker.io'] == {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
}
def test_events(self):
@@ -285,27 +283,37 @@ class DockerApiTest(BaseAPIClientTest):
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
- c = APIClient(base_url="unix://socket")
+ c = APIClient(
+ base_url="unix://socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
- c = APIClient(base_url="unix:///socket")
+ c = APIClient(
+ base_url="unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
- c = APIClient(base_url="http+unix:///socket")
+ c = APIClient(
+ base_url="http+unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
- c = APIClient(base_url="http://hostname:1234")
+ c = APIClient(
+ base_url="http://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
- c = APIClient(base_url="tcp://hostname:1234")
+ c = APIClient(
+ base_url="tcp://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
assert c.base_url == "http://hostname:1234"
@@ -330,8 +338,7 @@ class DockerApiTest(BaseAPIClientTest):
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
- if six.PY3:
- content_str = content_str.encode('utf-8')
+ content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
@@ -398,7 +405,7 @@ class UnixSocketStreamTest(unittest.TestCase):
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
- except socket.error:
+ except OSError:
# Probably no connection to accept yet
time.sleep(0.01)
continue
@@ -450,7 +457,9 @@ class UnixSocketStreamTest(unittest.TestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with APIClient(base_url="http+unix://" + self.socket_file) as client:
+ with APIClient(
+ base_url="http+unix://" + self.socket_file,
+ version=DEFAULT_DOCKER_API_VERSION) as client:
for i in range(5):
try:
stream = client.build(
@@ -467,56 +476,127 @@ class UnixSocketStreamTest(unittest.TestCase):
class TCPSocketStreamTest(unittest.TestCase):
- text_data = b'''
+ stdout_data = b'''
Now, those children out there, they're jumping through the
flames in the hope that the god of the fire will make them fruitful.
Really, you can't blame them. After all, what girl would not prefer the
child of a god to that of some acne-scarred artisan?
'''
+ stderr_data = b'''
+ And what of the true God? To whose glory churches and monasteries have been
+ built on these islands for generations past? Now shall what of Him?
+ '''
- def setUp(self):
-
- self.server = six.moves.socketserver.ThreadingTCPServer(
- ('', 0), self.get_handler_class()
- )
- self.thread = threading.Thread(target=self.server.serve_forever)
- self.thread.setDaemon(True)
- self.thread.start()
- self.address = 'http://{}:{}'.format(
- socket.gethostname(), self.server.server_address[1]
- )
-
- def tearDown(self):
- self.server.shutdown()
- self.server.server_close()
- self.thread.join()
-
- def get_handler_class(self):
- text_data = self.text_data
-
- class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
+ @classmethod
+ def setup_class(cls):
+ cls.server = socketserver.ThreadingTCPServer(
+ ('', 0), cls.get_handler_class())
+ cls.thread = threading.Thread(target=cls.server.serve_forever)
+ cls.thread.setDaemon(True)
+ cls.thread.start()
+ cls.address = 'http://{}:{}'.format(
+ socket.gethostname(), cls.server.server_address[1])
+
+ @classmethod
+ def teardown_class(cls):
+ cls.server.shutdown()
+ cls.server.server_close()
+ cls.thread.join()
+
+ @classmethod
+ def get_handler_class(cls):
+ stdout_data = cls.stdout_data
+ stderr_data = cls.stderr_data
+
+ class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
+ resp_data = self.get_resp_data()
self.send_response(101)
self.send_header(
- 'Content-Type', 'application/vnd.docker.raw-stream'
- )
+ 'Content-Type', 'application/vnd.docker.raw-stream')
self.send_header('Connection', 'Upgrade')
self.send_header('Upgrade', 'tcp')
self.end_headers()
self.wfile.flush()
time.sleep(0.2)
- self.wfile.write(text_data)
+ self.wfile.write(resp_data)
self.wfile.flush()
- return Handler
+ def get_resp_data(self):
+ path = self.path.split('/')[-1]
+ if path == 'tty':
+ return stdout_data + stderr_data
+ elif path == 'no-tty':
+ data = b''
+ data += self.frame_header(1, stdout_data)
+ data += stdout_data
+ data += self.frame_header(2, stderr_data)
+ data += stderr_data
+ return data
+ else:
+ raise Exception(f'Unknown path {path}')
+
+ @staticmethod
+ def frame_header(stream, data):
+ return struct.pack('>BxxxL', stream, len(data))
- def test_read_from_socket(self):
- with APIClient(base_url=self.address) as client:
- resp = client._post(client._url('/dummy'), stream=True)
- data = client._read_from_socket(resp, stream=True, tty=True)
- results = b''.join(data)
+ return Handler
- assert results == self.text_data
+ def request(self, stream=None, tty=None, demux=None):
+ assert stream is not None and tty is not None and demux is not None
+ with APIClient(
+ base_url=self.address,
+ version=DEFAULT_DOCKER_API_VERSION
+ ) as client:
+ if tty:
+ url = client._url('/tty')
+ else:
+ url = client._url('/no-tty')
+ resp = client._post(url, stream=True)
+ return client._read_from_socket(
+ resp, stream=stream, tty=tty, demux=demux)
+
+ def test_read_from_socket_tty(self):
+ res = self.request(stream=True, tty=True, demux=False)
+ assert next(res) == self.stdout_data + self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_tty_demux(self):
+ res = self.request(stream=True, tty=True, demux=True)
+ assert next(res) == (self.stdout_data + self.stderr_data, None)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty(self):
+ res = self.request(stream=True, tty=False, demux=False)
+ assert next(res) == self.stdout_data
+ assert next(res) == self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty_demux(self):
+ res = self.request(stream=True, tty=False, demux=True)
+ assert (self.stdout_data, None) == next(res)
+ assert (None, self.stderr_data) == next(res)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_stream_tty(self):
+ res = self.request(stream=False, tty=True, demux=False)
+ assert res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_tty_demux(self):
+ res = self.request(stream=False, tty=True, demux=True)
+ assert res == (self.stdout_data + self.stderr_data, None)
+
+ def test_read_from_socket_no_stream_no_tty(self):
+ res = self.request(stream=False, tty=False, demux=False)
+ res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_no_tty_demux(self):
+ res = self.request(stream=False, tty=False, demux=True)
+ assert res == (self.stdout_data, self.stderr_data)
class UserAgentTest(unittest.TestCase):
@@ -532,7 +612,7 @@ class UserAgentTest(unittest.TestCase):
self.patcher.stop()
def test_default_user_agent(self):
- client = APIClient()
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
client.version()
assert self.mock_send.call_count == 1
@@ -541,7 +621,9 @@ class UserAgentTest(unittest.TestCase):
assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
- client = APIClient(user_agent='foo/bar')
+ client = APIClient(
+ user_agent='foo/bar',
+ version=DEFAULT_DOCKER_API_VERSION)
client.version()
assert self.mock_send.call_count == 1
@@ -550,7 +632,7 @@ class UserAgentTest(unittest.TestCase):
class DisableSocketTest(unittest.TestCase):
- class DummySocket(object):
+ class DummySocket:
def __init__(self, timeout=60):
self.timeout = timeout
@@ -561,7 +643,7 @@ class DisableSocketTest(unittest.TestCase):
return self.timeout
def setUp(self):
- self.client = APIClient()
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py
index 7850c22..a8d9193 100644
--- a/tests/unit/api_volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -104,7 +104,7 @@ class VolumeTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][0] == 'GET'
- assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
+ assert args[0][1] == f'{url_prefix}volumes/{name}'
def test_remove_volume(self):
name = 'perfectcherryblossom'
@@ -112,4 +112,4 @@ class VolumeTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][0] == 'DELETE'
- assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
+ assert args[0][1] == f'{url_prefix}volumes/{name}'
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 947d680..8bd2e16 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import base64
import json
import os
@@ -9,13 +7,13 @@ import shutil
import tempfile
import unittest
-from docker import auth, errors
+from docker import auth, credentials, errors
import pytest
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class RegressionTest(unittest.TestCase):
@@ -106,13 +104,13 @@ class ResolveAuthTest(unittest.TestCase):
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = {
+ auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
- }
+ })
def test_resolve_authconfig_hostname_only(self):
assert auth.resolve_authconfig(
@@ -211,70 +209,21 @@ class ResolveAuthTest(unittest.TestCase):
) is None
def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
- auth_config = {
+ auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': self.index_config,
}),
'credsStore': 'blackbox'
- }
- with mock.patch('docker.auth._resolve_authconfig_credstore') as m:
+ })
+ with mock.patch(
+ 'docker.auth.AuthConfig._resolve_authconfig_credstore'
+ ) as m:
m.return_value = None
assert 'indexuser' == auth.resolve_authconfig(
auth_config, None
)['username']
-class CredStoreTest(unittest.TestCase):
- def test_get_credential_store(self):
- auth_config = {
- 'credHelpers': {
- 'registry1.io': 'truesecret',
- 'registry2.io': 'powerlock'
- },
- 'credsStore': 'blackbox',
- }
-
- assert auth.get_credential_store(
- auth_config, 'registry1.io'
- ) == 'truesecret'
- assert auth.get_credential_store(
- auth_config, 'registry2.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'registry3.io'
- ) == 'blackbox'
-
- def test_get_credential_store_no_default(self):
- auth_config = {
- 'credHelpers': {
- 'registry1.io': 'truesecret',
- 'registry2.io': 'powerlock'
- },
- }
- assert auth.get_credential_store(
- auth_config, 'registry2.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'registry3.io'
- ) is None
-
- def test_get_credential_store_default_index(self):
- auth_config = {
- 'credHelpers': {
- 'https://index.docker.io/v1/': 'powerlock'
- },
- 'credsStore': 'truesecret'
- }
-
- assert auth.get_credential_store(auth_config, None) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'docker.io'
- ) == 'powerlock'
- assert auth.get_credential_store(
- auth_config, 'images.io'
- ) == 'truesecret'
-
-
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
@@ -288,13 +237,13 @@ class LoadConfigTest(unittest.TestCase):
cfg_path = os.path.join(folder, '.dockercfg')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
with open(cfg_path, 'w') as f:
- f.write('auth = {0}\n'.format(auth_))
+ f.write(f'auth = {auth_}\n')
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_NAME] is not None
- cfg = cfg['auths'][auth.INDEX_NAME]
+ assert cfg.auths[auth.INDEX_NAME] is not None
+ cfg = cfg.auths[auth.INDEX_NAME]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -312,8 +261,8 @@ class LoadConfigTest(unittest.TestCase):
)
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_URL] is not None
- cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == email
@@ -335,8 +284,8 @@ class LoadConfigTest(unittest.TestCase):
}, f)
cfg = auth.load_config(cfg_path)
assert auth.resolve_authconfig(cfg) is not None
- assert cfg['auths'][auth.INDEX_URL] is not None
- cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == email
@@ -346,13 +295,13 @@ class LoadConfigTest(unittest.TestCase):
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
- '.{0}.dockercfg'.format(
+ '.{}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -360,7 +309,7 @@ class LoadConfigTest(unittest.TestCase):
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
- cfg = auth.load_config(dockercfg_path)['auths']
+ cfg = auth.load_config(dockercfg_path).auths
assert registry in cfg
assert cfg[registry] is not None
cfg = cfg[registry]
@@ -378,7 +327,7 @@ class LoadConfigTest(unittest.TestCase):
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -387,7 +336,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
- cfg = auth.load_config(None)['auths']
+ cfg = auth.load_config(None).auths
assert registry in cfg
assert cfg[registry] is not None
cfg = cfg[registry]
@@ -406,7 +355,7 @@ class LoadConfigTest(unittest.TestCase):
config = {
'auths': {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -417,8 +366,8 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert cfg['username'] == 'sakuya'
assert cfg['password'] == 'izayoi'
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -435,7 +384,7 @@ class LoadConfigTest(unittest.TestCase):
config = {
'auths': {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -446,8 +395,8 @@ class LoadConfigTest(unittest.TestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
assert cfg['email'] == 'sakuya@scarlet.net'
@@ -464,7 +413,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'auths': {}}
+ assert dict(cfg) == {'auths': {}}
def test_load_config_invalid_auth_dict(self):
folder = tempfile.mkdtemp()
@@ -479,7 +428,7 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'auths': {'scarlet.net': {}}}
+ assert dict(cfg) == {'auths': {'scarlet.net': {}}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
@@ -500,7 +449,352 @@ class LoadConfigTest(unittest.TestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert registry in cfg['auths']
- cfg = cfg['auths'][registry]
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
+
+
+class CredstoreTest(unittest.TestCase):
+ def setUp(self):
+ self.authconfig = auth.AuthConfig({'credsStore': 'default'})
+ self.default_store = InMemoryStore('default')
+ self.authconfig._stores['default'] = self.default_store
+ self.default_store.store(
+ 'https://gensokyo.jp/v2', 'sakuya', 'izayoi',
+ )
+ self.default_store.store(
+ 'https://default.com/v2', 'user', 'hunter2',
+ )
+
+ def test_get_credential_store(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ })
+
+ assert auth_config.get_credential_store('registry1.io') == 'truesecret'
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ })
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ })
+
+ assert auth_config.get_credential_store(None) == 'powerlock'
+ assert auth_config.get_credential_store('docker.io') == 'powerlock'
+ assert auth_config.get_credential_store('images.io') == 'truesecret'
+
+ def test_get_credential_store_with_plain_dict(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_all_credentials_credstore_only(self):
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_credhelper(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ self.authconfig._stores['truesecret'] = InMemoryStore()
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': None,
+ }
+
+ def test_get_all_credentials_with_credhelpers_only(self):
+ del self.authconfig['credsStore']
+ assert self.authconfig.get_all_credentials() == {}
+
+ self.authconfig['credHelpers'] = {
+ 'https://gensokyo.jp/v2': 'default',
+ 'https://default.com/v2': 'default',
+ }
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_auths_entries(self):
+ self.authconfig.add_auth('registry1.io', {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_auths_entry(self):
+ self.authconfig.add_auth('default.com', {})
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_credstore_overrides_auth_entry(self):
+ self.authconfig.add_auth('default.com', {
+ 'Username': 'shouldnotsee',
+ 'Password': 'thisentry',
+ 'ServerAddress': 'https://default.com/v2',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_helpers_override_default(self):
+ self.authconfig['credHelpers'] = {
+ 'https://default.com/v2': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('https://default.com/v2', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_3_sources(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('registry1.io', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ self.authconfig.add_auth('registry2.io', {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ 'registry2.io': {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ }
+ }
+
+
+class InMemoryStore(credentials.Store):
+ def __init__(self, *args, **kwargs):
+ self.__store = {}
+
+ def get(self, server):
+ try:
+ return self.__store[server]
+ except KeyError:
+ raise credentials.errors.CredentialsNotFound()
+
+ def store(self, server, username, secret):
+ self.__store[server] = {
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret,
+ }
+
+ def list(self):
+ return {
+ k: v['Username'] for k, v in self.__store.items()
+ }
+
+ def erase(self, server):
+ del self.__store[server]
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index cce99c5..d647d3a 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -1,22 +1,24 @@
import datetime
+import os
+import unittest
+
import docker
-from docker.utils import kwargs_from_env
+import pytest
from docker.constants import (
- DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
+ DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_MAX_POOL_SIZE, IS_WINDOWS_PLATFORM
)
-import os
-import unittest
+from docker.utils import kwargs_from_env
from . import fake_api
-import pytest
try:
from unittest import mock
except ImportError:
- import mock
-
+ from unittest import mock
TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+POOL_SIZE = 20
class ClientTest(unittest.TestCase):
@@ -25,33 +27,33 @@ class ClientTest(unittest.TestCase):
def test_events(self, mock_func):
since = datetime.datetime(2016, 1, 1, 0, 0)
mock_func.return_value = fake_api.get_fake_events()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.events(since=since) == mock_func.return_value
mock_func.assert_called_with(since=since)
@mock.patch('docker.api.APIClient.info')
def test_info(self, mock_func):
mock_func.return_value = fake_api.get_fake_info()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.info() == mock_func.return_value
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.ping')
def test_ping(self, mock_func):
mock_func.return_value = True
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.ping() is True
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.version')
def test_version(self, mock_func):
mock_func.return_value = fake_api.get_fake_version()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.version() == mock_func.return_value
mock_func.assert_called_with()
def test_call_api_client_method(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
with pytest.raises(AttributeError) as cm:
client.create_container()
s = cm.exconly()
@@ -65,7 +67,9 @@ class ClientTest(unittest.TestCase):
assert "this method is now on the object APIClient" not in s
def test_call_containers(self):
- client = docker.DockerClient(**kwargs_from_env())
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ **kwargs_from_env())
with pytest.raises(TypeError) as cm:
client.containers()
@@ -74,6 +78,84 @@ class ClientTest(unittest.TestCase):
assert "'ContainerCollection' object is not callable" in s
assert "docker.APIClient" in s
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_default_pool_size_unix(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_default_pool_size_win(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_pool_size_unix(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_pool_size_win(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
class FromEnvTest(unittest.TestCase):
@@ -90,7 +172,7 @@ class FromEnvTest(unittest.TestCase):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api.base_url == "https://192.168.59.103:2376"
def test_from_env_with_version(self):
@@ -102,11 +184,85 @@ class FromEnvTest(unittest.TestCase):
assert client.api._version == '2.32'
def test_from_env_without_version_uses_default(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api._version == DEFAULT_DOCKER_API_VERSION
def test_from_env_without_timeout_uses_default(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_default_pool_size_from_env_unix(self, mock_obj):
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_default_pool_size_from_env_win(self, mock_obj):
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_pool_size_from_env_unix(self, mock_obj):
+ client = docker.from_env(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_pool_size_from_env_win(self, mock_obj):
+ client = docker.from_env(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=POOL_SIZE
+ )
diff --git a/tests/unit/context_test.py b/tests/unit/context_test.py
new file mode 100644
index 0000000..6d6d672
--- /dev/null
+++ b/tests/unit/context_test.py
@@ -0,0 +1,49 @@
+import unittest
+import docker
+import pytest
+from docker.constants import DEFAULT_UNIX_SOCKET
+from docker.constants import DEFAULT_NPIPE
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.context import ContextAPI, Context
+
+
+class BaseContextTest(unittest.TestCase):
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Linux specific path check'
+ )
+ def test_url_compatibility_on_linux(self):
+ c = Context("test")
+ assert c.Host == DEFAULT_UNIX_SOCKET.strip("http+")
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Windows specific path check'
+ )
+ def test_url_compatibility_on_windows(self):
+ c = Context("test")
+ assert c.Host == DEFAULT_NPIPE
+
+ def test_fail_on_default_context_create(self):
+ with pytest.raises(docker.errors.ContextException):
+ ContextAPI.create_context("default")
+
+ def test_default_in_context_list(self):
+ found = False
+ ctx = ContextAPI.contexts()
+ for c in ctx:
+ if c.Name == "default":
+ found = True
+ assert found is True
+
+ def test_get_current_context(self):
+ assert ContextAPI.get_current_context().Name == "default"
+
+ def test_https_host(self):
+ c = Context("test", host="tcp://testdomain:8080", tls=True)
+ assert c.Host == "https://testdomain:8080"
+
+ def test_context_inspect_without_params(self):
+ ctx = ContextAPI.inspect_context()
+ assert ctx["Name"] == "default"
+ assert ctx["Metadata"]["StackOrchestrator"] == "swarm"
+ assert ctx["Endpoints"]["docker"]["Host"] in [
+ DEFAULT_NPIPE, DEFAULT_UNIX_SOCKET.strip("http+")]
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
index 2be0578..a0a171b 100644
--- a/tests/unit/dockertypes_test.py
+++ b/tests/unit/dockertypes_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import unittest
import pytest
@@ -14,8 +12,8 @@ from docker.types.services import convert_service_ports
try:
from unittest import mock
-except:
- import mock
+except: # noqa: E722
+ from unittest import mock
def create_host_config(*args, **kwargs):
@@ -85,6 +83,12 @@ class HostConfigTest(unittest.TestCase):
with pytest.raises(ValueError):
create_host_config(version='1.23', userns_mode='host12')
+ def test_create_host_config_with_uts(self):
+ config = create_host_config(version='1.15', uts_mode='host')
+ assert config.get('UTSMode') == 'host'
+ with pytest.raises(ValueError):
+ create_host_config(version='1.15', uts_mode='host12')
+
def test_create_host_config_with_oom_score_adj(self):
config = create_host_config(version='1.22', oom_score_adj=100)
assert config.get('OomScoreAdj') == 100
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
index e27a9b1..f8c3a66 100644
--- a/tests/unit/errors_test.py
+++ b/tests/unit/errors_test.py
@@ -79,18 +79,39 @@ class APIErrorTest(unittest.TestCase):
err = APIError('', response=resp)
assert err.is_client_error() is True
+ def test_is_error_300(self):
+ """Report no error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_error() is False
+
+ def test_is_error_400(self):
+ """Report error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
+ def test_is_error_500(self):
+ """Report error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
def test_create_error_from_exception(self):
- resp = requests.Response()
- resp.status_code = 500
- err = APIError('')
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
try:
- resp.raise_for_status()
- except requests.exceptions.HTTPError as e:
- try:
- create_api_error_from_http_exception(e)
- except APIError as e:
- err = e
- assert err.is_server_error() is True
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
class ContainerErrorTest(unittest.TestCase):
@@ -105,7 +126,7 @@ class ContainerErrorTest(unittest.TestCase):
err = ContainerError(container, exit_status, command, image, stderr)
msg = ("Command '{}' in image '{}' returned non-zero exit status {}"
- ).format(command, image, exit_status, stderr)
+ ).format(command, image, exit_status)
assert str(err) == msg
def test_container_with_stderr(self):
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index e609b64..4c93329 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -1,7 +1,8 @@
-from . import fake_stat
from docker import constants
-CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
+from . import fake_stat
+
+CURRENT_VERSION = f'v{constants.DEFAULT_DOCKER_API_VERSION}'
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
@@ -16,6 +17,8 @@ FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
FAKE_NODE_ID = '24ifsmvkjbyhk'
+FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o'
+FAKE_SECRET_NAME = 'super_secret'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
@@ -511,102 +514,108 @@ def post_fake_network_disconnect():
return 200, None
+def post_fake_secret():
+ status_code = 200
+ response = {'ID': FAKE_SECRET_ID}
+ return status_code, response
+
+
# Maps real api url to fake response callback
prefix = 'http+docker://localhost'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
- '{0}/version'.format(prefix):
+ f'{prefix}/version':
get_fake_version,
- '{1}/{0}/version'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/version':
get_fake_version,
- '{1}/{0}/info'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/info':
get_fake_info,
- '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/auth':
post_fake_auth,
- '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/_ping':
get_fake_ping,
- '{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/search':
get_fake_search,
- '{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/json':
get_fake_images,
- '{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/history':
get_fake_image_history,
- '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_import_image,
- '{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/json':
get_fake_containers,
- '{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start':
post_fake_start_container,
- '{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize':
post_fake_resize_container,
- '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json':
get_fake_inspect_container,
- '{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename':
post_fake_rename_container,
- '{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag':
post_fake_tag_image,
- '{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait':
get_fake_wait,
- '{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs':
get_fake_logs,
- '{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes':
get_fake_diff,
- '{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export':
get_fake_export,
- '{1}/{0}/containers/3cc2351ab11b/update'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update':
post_fake_update_container,
- '{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec':
post_fake_exec_create,
- '{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start':
post_fake_exec_start,
- '{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json':
get_fake_exec_inspect,
- '{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize':
post_fake_exec_resize,
- '{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats':
get_fake_stats,
- '{1}/{0}/containers/3cc2351ab11b/top'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top':
get_fake_top,
- '{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop':
post_fake_stop_container,
- '{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill':
post_fake_kill_container,
- '{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause':
post_fake_pause_container,
- '{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause':
post_fake_unpause_container,
- '{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart':
post_fake_restart_container,
- '{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b':
delete_fake_remove_container,
- '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_image_create,
- '{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128':
delete_fake_remove_image,
- '{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get':
get_fake_get_image,
- '{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/load':
post_fake_load_image,
- '{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/json':
get_fake_inspect_image,
- '{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/insert':
get_fake_insert_image,
- '{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/push':
post_fake_push,
- '{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/commit':
post_fake_commit,
- '{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/create':
post_fake_create_container,
- '{1}/{0}/build'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/build':
post_fake_build_container,
- '{1}/{0}/events'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/events':
get_fake_events,
- ('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
+ (f'{prefix}/{CURRENT_VERSION}/volumes', 'GET'):
get_fake_volume_list,
- ('{1}/{0}/volumes/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/volumes/create', 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
@@ -620,11 +629,11 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
- ('{1}/{0}/swarm/join'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/swarm/join', 'POST'):
post_fake_join_swarm,
- ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ (f'{prefix}/{CURRENT_VERSION}/networks', 'GET'):
get_fake_network_list,
- ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/networks/create', 'POST'):
post_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
@@ -642,4 +651,6 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_disconnect,
+ f'{prefix}/{CURRENT_VERSION}/secrets/create':
+ post_fake_secret,
}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
index 2147bfd..1663ef1 100644
--- a/tests/unit/fake_api_client.py
+++ b/tests/unit/fake_api_client.py
@@ -1,12 +1,13 @@
import copy
-import docker
+import docker
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from . import fake_api
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class CopyReturnMagicMock(mock.MagicMock):
@@ -14,7 +15,7 @@ class CopyReturnMagicMock(mock.MagicMock):
A MagicMock which deep copies every return value.
"""
def _mock_call(self, *args, **kwargs):
- ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ ret = super()._mock_call(*args, **kwargs)
if isinstance(ret, (dict, list)):
ret = copy.deepcopy(ret)
return ret
@@ -30,7 +31,7 @@ def make_fake_api_client(overrides=None):
if overrides is None:
overrides = {}
- api_client = docker.APIClient()
+ api_client = docker.APIClient(version=DEFAULT_DOCKER_API_VERSION)
mock_attrs = {
'build.return_value': fake_api.FAKE_IMAGE_ID,
'commit.return_value': fake_api.post_fake_commit()[1],
@@ -39,6 +40,7 @@ def make_fake_api_client(overrides=None):
fake_api.post_fake_create_container()[1],
'create_host_config.side_effect': api_client.create_host_config,
'create_network.return_value': fake_api.post_fake_network()[1],
+ 'create_secret.return_value': fake_api.post_fake_secret()[1],
'exec_create.return_value': fake_api.post_fake_exec_create()[1],
'exec_start.return_value': fake_api.post_fake_exec_start()[1],
'images.return_value': fake_api.get_fake_images()[1],
@@ -50,6 +52,7 @@ def make_fake_api_client(overrides=None):
'networks.return_value': fake_api.get_fake_network_list()[1],
'start.return_value': None,
'wait.return_value': {'StatusCode': 0},
+ 'version.return_value': fake_api.get_fake_version()
}
mock_attrs.update(overrides)
mock_client = CopyReturnMagicMock(**mock_attrs)
@@ -62,6 +65,6 @@ def make_fake_client(overrides=None):
"""
Returns a Client with a fake APIClient.
"""
- client = docker.DockerClient()
+ client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION)
client.api = make_fake_api_client(overrides)
return client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
index 48a5288..c7aa46b 100644
--- a/tests/unit/models_containers_test.py
+++ b/tests/unit/models_containers_test.py
@@ -95,6 +95,7 @@ class ContainerCollectionTest(unittest.TestCase):
ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
user='bob',
userns_mode='host',
+ uts_mode='host',
version='1.23',
volume_driver='some_driver',
volumes=[
@@ -174,6 +175,8 @@ class ContainerCollectionTest(unittest.TestCase):
'Tmpfs': {'/blah': ''},
'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
'UsernsMode': 'host',
+ 'UTSMode': 'host',
+ 'VolumeDriver': 'some_driver',
'VolumesFrom': ['container'],
},
healthcheck={'test': 'true'},
@@ -188,7 +191,6 @@ class ContainerCollectionTest(unittest.TestCase):
stop_signal=9,
tty=True,
user='bob',
- volume_driver='some_driver',
volumes=[
'/mnt/vol2',
'/mnt/vol1',
@@ -230,7 +232,9 @@ class ContainerCollectionTest(unittest.TestCase):
container = client.containers.run('alpine', 'sleep 300', detach=True)
assert container.id == FAKE_CONTAINER_ID
- client.api.pull.assert_called_with('alpine', platform=None, tag=None)
+ client.api.pull.assert_called_with(
+ 'alpine', platform=None, tag='latest', all_tags=False, stream=True
+ )
def test_run_with_error(self):
client = make_fake_client()
@@ -412,10 +416,11 @@ class ContainerTest(unittest.TestCase):
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
stdin=False, tty=False, privileged=True, user='', environment=None,
- workdir=None
+ workdir=None,
)
client.api.exec_start.assert_called_with(
- FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False,
+ demux=False,
)
def test_exec_run_failure(self):
@@ -425,10 +430,11 @@ class ContainerTest(unittest.TestCase):
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True,
stdin=False, tty=False, privileged=True, user='', environment=None,
- workdir=None
+ workdir=None,
)
client.api.exec_start.assert_called_with(
- FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False
+ FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False,
+ demux=False,
)
def test_export(self):
@@ -444,7 +450,7 @@ class ContainerTest(unittest.TestCase):
container = client.containers.get(FAKE_CONTAINER_ID)
container.get_archive('foo')
client.api.get_archive.assert_called_with(
- FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
+ FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE, False
)
def test_image(self):
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
index 6783279..f3ca0be 100644
--- a/tests/unit/models_images_test.py
+++ b/tests/unit/models_images_test.py
@@ -1,6 +1,8 @@
+import unittest
+import warnings
+
from docker.constants import DEFAULT_DATA_CHUNK_SIZE
from docker.models.images import Image
-import unittest
from .fake_api import FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
@@ -42,16 +44,36 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull(self):
client = make_fake_client()
- image = client.images.pull('test_image:latest')
- client.api.pull.assert_called_with('test_image', tag='latest')
+ image = client.images.pull('test_image:test')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='test', all_tags=False, stream=True
+ )
+ client.api.inspect_image.assert_called_with('test_image:test')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_pull_tag_precedence(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image:latest', tag='test')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='test', all_tags=False, stream=True
+ )
+ client.api.inspect_image.assert_called_with('test_image:test')
+
+ image = client.images.pull('test_image')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='latest', all_tags=False, stream=True
+ )
client.api.inspect_image.assert_called_with('test_image:latest')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_pull_multiple(self):
client = make_fake_client()
- images = client.images.pull('test_image')
- client.api.pull.assert_called_with('test_image', tag=None)
+ images = client.images.pull('test_image', all_tags=True)
+ client.api.pull.assert_called_with(
+ 'test_image', tag='latest', all_tags=True, stream=True
+ )
client.api.images.assert_called_with(
all=False, name='test_image', filters=None
)
@@ -61,6 +83,16 @@ class ImageCollectionTest(unittest.TestCase):
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
+ def test_pull_with_stream_param(self):
+ client = make_fake_client()
+ with warnings.catch_warnings(record=True) as w:
+ client.images.pull('test_image', stream=True)
+
+ assert len(w) == 1
+ assert str(w[0].message).startswith(
+ '`stream` is not a valid parameter'
+ )
+
def test_push(self):
client = make_fake_client()
client.images.push('foobar', insecure_registry=True)
@@ -80,6 +112,11 @@ class ImageCollectionTest(unittest.TestCase):
client.images.search('test')
client.api.search.assert_called_with('test')
+ def test_search_limit(self):
+ client = make_fake_client()
+ client.images.search('test', limit=5)
+ client.api.search.assert_called_with('test', limit=5)
+
class ImageTest(unittest.TestCase):
def test_short_id(self):
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
index 5af24ee..11dea29 100644
--- a/tests/unit/models_resources_test.py
+++ b/tests/unit/models_resources_test.py
@@ -16,7 +16,7 @@ class ModelTest(unittest.TestCase):
def test_hash(self):
client = make_fake_client()
container1 = client.containers.get(FAKE_CONTAINER_ID)
- my_set = set([container1])
+ my_set = {container1}
assert len(my_set) == 1
container2 = client.containers.get(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_secrets_test.py b/tests/unit/models_secrets_test.py
new file mode 100644
index 0000000..1c261a8
--- /dev/null
+++ b/tests/unit/models_secrets_test.py
@@ -0,0 +1,11 @@
+import unittest
+
+from .fake_api_client import make_fake_client
+from .fake_api import FAKE_SECRET_NAME
+
+
+class CreateServiceTest(unittest.TestCase):
+ def test_secrets_repr(self):
+ client = make_fake_client()
+ secret = client.secrets.create(name="super_secret", data="secret")
+ assert secret.__repr__() == f"<Secret: '{FAKE_SECRET_NAME}'>"
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
index 247bb4a..b9192e4 100644
--- a/tests/unit/models_services_test.py
+++ b/tests/unit/models_services_test.py
@@ -26,6 +26,9 @@ class CreateServiceKwargsTest(unittest.TestCase):
'mounts': [{'some': 'mounts'}],
'stop_grace_period': 5,
'constraints': ['foo=bar'],
+ 'preferences': ['bar=baz'],
+ 'platforms': [('x86_64', 'linux')],
+ 'maxreplicas': 1
})
task_template = kwargs.pop('task_template')
@@ -37,17 +40,22 @@ class CreateServiceKwargsTest(unittest.TestCase):
'update_config': {'update': 'config'},
'endpoint_spec': {'blah': 'blah'},
}
- assert set(task_template.keys()) == set([
+ assert set(task_template.keys()) == {
'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
'LogDriver', 'Networks'
- ])
- assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ }
+ assert task_template['Placement'] == {
+ 'Constraints': ['foo=bar'],
+ 'Preferences': ['bar=baz'],
+ 'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}],
+ 'MaxReplicas': 1,
+ }
assert task_template['LogDriver'] == {
'Name': 'logdriver',
'Options': {'foo': 'bar'}
}
assert task_template['Networks'] == [{'Target': 'somenet'}]
- assert set(task_template['ContainerSpec'].keys()) == set([
+ assert set(task_template['ContainerSpec'].keys()) == {
'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
'Labels', 'Mounts', 'StopGracePeriod'
- ])
+ }
diff --git a/tests/unit/sshadapter_test.py b/tests/unit/sshadapter_test.py
new file mode 100644
index 0000000..874239a
--- /dev/null
+++ b/tests/unit/sshadapter_test.py
@@ -0,0 +1,39 @@
+import unittest
+import docker
+from docker.transport.sshconn import SSHSocket
+
+
+class SSHAdapterTest(unittest.TestCase):
+ @staticmethod
+ def test_ssh_hostname_prefix_trim():
+ conn = docker.transport.SSHHTTPAdapter(
+ base_url="ssh://user@hostname:1234", shell_out=True)
+ assert conn.ssh_host == "user@hostname:1234"
+
+ @staticmethod
+ def test_ssh_parse_url():
+ c = SSHSocket(host="user@hostname:1234")
+ assert c.host == "hostname"
+ assert c.port == "1234"
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_only():
+ c = SSHSocket(host="hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user is None
+
+ @staticmethod
+ def test_ssh_parse_user_and_hostname():
+ c = SSHSocket(host="user@hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_and_port():
+ c = SSHSocket(host="hostname:22")
+ assert c.host == "hostname"
+ assert c.port == "22"
+ assert c.user is None
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 73b7336..41a87f2 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -32,30 +32,30 @@ class SSLAdapterTest(unittest.TestCase):
class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
- (('countryName', u'US'),),
- (('stateOrProvinceName', u'California'),),
- (('localityName', u'San Francisco'),),
- (('organizationName', u'Docker Inc'),),
- (('organizationalUnitName', u'Docker-Python'),),
- (('commonName', u'localhost'),),
- (('emailAddress', u'info@docker.com'),)
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
),
'notAfter': 'Mar 25 23:08:23 2030 GMT',
- 'notBefore': u'Mar 25 23:08:23 2016 GMT',
- 'serialNumber': u'BD5F894C839C548F',
+ 'notBefore': 'Mar 25 23:08:23 2016 GMT',
+ 'serialNumber': 'BD5F894C839C548F',
'subject': (
- (('countryName', u'US'),),
- (('stateOrProvinceName', u'California'),),
- (('localityName', u'San Francisco'),),
- (('organizationName', u'Docker Inc'),),
- (('organizationalUnitName', u'Docker-Python'),),
- (('commonName', u'localhost'),),
- (('emailAddress', u'info@docker.com'),)
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
),
'subjectAltName': (
- ('DNS', u'localhost'),
- ('DNS', u'*.gensokyo.jp'),
- ('IP Address', u'127.0.0.1'),
+ ('DNS', 'localhost'),
+ ('DNS', '*.gensokyo.jp'),
+ ('IP Address', '127.0.0.1'),
),
'version': 3
}
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
index 4385380..aee1b9e 100644
--- a/tests/unit/swarm_test.py
+++ b/tests/unit/swarm_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import json
from . import fake_api
diff --git a/tests/unit/types_containers_test.py b/tests/unit/types_containers_test.py
new file mode 100644
index 0000000..b0ad0a7
--- /dev/null
+++ b/tests/unit/types_containers_test.py
@@ -0,0 +1,6 @@
+from docker.types.containers import ContainerConfig
+
+
+def test_uid_0_is_not_elided():
+ x = ContainerConfig(image='i', version='v', command='true', user=0)
+ assert x['User'] == '0'
diff --git a/tests/unit/utils_build_test.py b/tests/unit/utils_build_test.py
index 012f15b..9f18388 100644
--- a/tests/unit/utils_build_test.py
+++ b/tests/unit/utils_build_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import os
import os.path
import shutil
@@ -82,7 +80,7 @@ class ExcludePathsTest(unittest.TestCase):
assert sorted(paths) == sorted(set(paths))
def test_wildcard_exclude(self):
- assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
+ assert self.exclude(['*']) == {'Dockerfile', '.dockerignore'}
def test_exclude_dockerfile_dockerignore(self):
"""
@@ -99,18 +97,18 @@ class ExcludePathsTest(unittest.TestCase):
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
- assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(
- ['Dockerfile.alt', '.dockerignore']
- )
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == {
+ 'Dockerfile.alt', '.dockerignore'
+ }
assert self.exclude(
['*'], dockerfile='foo/Dockerfile3'
- ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+ ) == convert_paths({'foo/Dockerfile3', '.dockerignore'})
# https://github.com/docker/docker-py/issues/1956
assert self.exclude(
['*'], dockerfile='./foo/Dockerfile3'
- ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+ ) == convert_paths({'foo/Dockerfile3', '.dockerignore'})
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
@@ -119,56 +117,56 @@ class ExcludePathsTest(unittest.TestCase):
def test_single_filename(self):
assert self.exclude(['a.py']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
def test_single_filename_leading_dot_slash(self):
assert self.exclude(['./a.py']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
# As odd as it sounds, a filename pattern with a trailing slash on the
# end *will* result in that file being excluded.
def test_single_filename_trailing_slash(self):
assert self.exclude(['a.py/']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
def test_wildcard_filename_start(self):
assert self.exclude(['*.py']) == convert_paths(
- self.all_paths - set(['a.py', 'b.py', 'cde.py'])
+ self.all_paths - {'a.py', 'b.py', 'cde.py'}
)
def test_wildcard_with_exception(self):
assert self.exclude(['*.py', '!b.py']) == convert_paths(
- self.all_paths - set(['a.py', 'cde.py'])
+ self.all_paths - {'a.py', 'cde.py'}
)
def test_wildcard_with_wildcard_exception(self):
assert self.exclude(['*.*', '!*.go']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
- ])
+ }
)
def test_wildcard_filename_end(self):
assert self.exclude(['a.*']) == convert_paths(
- self.all_paths - set(['a.py', 'a.go'])
+ self.all_paths - {'a.py', 'a.go'}
)
def test_question_mark(self):
assert self.exclude(['?.py']) == convert_paths(
- self.all_paths - set(['a.py', 'b.py'])
+ self.all_paths - {'a.py', 'b.py'}
)
def test_single_subdir_single_filename(self):
assert self.exclude(['foo/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_single_subdir_single_filename_leading_slash(self):
assert self.exclude(['/foo/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_exclude_include_absolute_path(self):
@@ -176,57 +174,57 @@ class ExcludePathsTest(unittest.TestCase):
assert exclude_paths(
base,
['/*', '!/*.py']
- ) == set(['a.py', 'b.py'])
+ ) == {'a.py', 'b.py'}
def test_single_subdir_with_path_traversal(self):
assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_single_subdir_wildcard_filename(self):
assert self.exclude(['foo/*.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'foo/b.py'])
+ self.all_paths - {'foo/a.py', 'foo/b.py'}
)
def test_wildcard_subdir_single_filename(self):
assert self.exclude(['*/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'bar/a.py'])
+ self.all_paths - {'foo/a.py', 'bar/a.py'}
)
def test_wildcard_subdir_wildcard_filename(self):
assert self.exclude(['*/*.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'foo/b.py', 'bar/a.py'])
+ self.all_paths - {'foo/a.py', 'foo/b.py', 'bar/a.py'}
)
def test_directory(self):
assert self.exclude(['foo']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py',
'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_trailing_slash(self):
assert self.exclude(['foo']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
@pytest.mark.skipif(
@@ -234,21 +232,21 @@ class ExcludePathsTest(unittest.TestCase):
)
def test_directory_with_subdir_exception_win32_pathsep(self):
assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
def test_subdirectory(self):
assert self.exclude(['foo/bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
@pytest.mark.skipif(
@@ -256,33 +254,33 @@ class ExcludePathsTest(unittest.TestCase):
)
def test_subdirectory_win32_pathsep(self):
assert self.exclude(['foo\\bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
def test_double_wildcard(self):
assert self.exclude(['**/a.py']) == convert_paths(
- self.all_paths - set(
- ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
- )
+ self.all_paths - {
+ 'a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py'
+ }
)
assert self.exclude(['foo/**/bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
def test_single_and_double_wildcard(self):
assert self.exclude(['**/target/*/*']) == convert_paths(
- self.all_paths - set(
- ['target/subdir/file.txt',
+ self.all_paths - {
+ 'target/subdir/file.txt',
'subdir/target/subdir/file.txt',
- 'subdir/subdir2/target/subdir/file.txt']
- )
+ 'subdir/subdir2/target/subdir/file.txt'
+ }
)
def test_trailing_double_wildcard(self):
assert self.exclude(['subdir/**']) == convert_paths(
- self.all_paths - set(
- ['subdir/file.txt',
+ self.all_paths - {
+ 'subdir/file.txt',
'subdir/target/file.txt',
'subdir/target/subdir/file.txt',
'subdir/subdir2/file.txt',
@@ -292,16 +290,16 @@ class ExcludePathsTest(unittest.TestCase):
'subdir/target/subdir',
'subdir/subdir2',
'subdir/subdir2/target',
- 'subdir/subdir2/target/subdir']
- )
+ 'subdir/subdir2/target/subdir'
+ }
)
def test_double_wildcard_with_exception(self):
assert self.exclude(['**', '!bar', '!foo/bar']) == convert_paths(
- set([
+ {
'foo/bar', 'foo/bar/a.py', 'bar', 'bar/a.py', 'Dockerfile',
'.dockerignore',
- ])
+ }
)
def test_include_wildcard(self):
@@ -324,7 +322,7 @@ class ExcludePathsTest(unittest.TestCase):
assert exclude_paths(
base,
['*.md', '!README*.md', 'README-secret.md']
- ) == set(['README.md', 'README-bis.md'])
+ ) == {'README.md', 'README-bis.md'}
def test_parent_directory(self):
base = make_tree(
@@ -335,12 +333,12 @@ class ExcludePathsTest(unittest.TestCase):
# Dockerignore reference stipulates that absolute paths are
# equivalent to relative paths, hence /../foo should be
# equivalent to ../foo. It also stipulates that paths are run
- # through Go's filepath.Clean, which explicitely "replace
+ # through Go's filepath.Clean, which explicitly "replace
# "/.." by "/" at the beginning of a path".
assert exclude_paths(
base,
['../a.py', '/../b.py']
- ) == set(['c.py'])
+ ) == {'c.py'}
class TarTest(unittest.TestCase):
@@ -374,14 +372,14 @@ class TarTest(unittest.TestCase):
'.dockerignore',
]
- expected_names = set([
+ expected_names = {
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
- ])
+ }
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
@@ -413,7 +411,7 @@ class TarTest(unittest.TestCase):
with pytest.raises(IOError) as ei:
tar(base)
- assert 'Can not read file in context: {}'.format(full_path) in (
+ assert f'Can not read file in context: {full_path}' in (
ei.exconly()
)
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
index 50ba383..83e04a1 100644
--- a/tests/unit/utils_config_test.py
+++ b/tests/unit/utils_config_test.py
@@ -4,36 +4,36 @@ import shutil
import tempfile
import json
-from py.test import ensuretemp
-from pytest import mark
+from pytest import mark, fixture
+
from docker.utils import config
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class FindConfigFileTest(unittest.TestCase):
- def tmpdir(self, name):
- tmpdir = ensuretemp(name)
- self.addCleanup(tmpdir.remove)
- return tmpdir
+
+ @fixture(autouse=True)
+ def tmpdir(self, tmpdir):
+ self.mkdir = tmpdir.mkdir
def test_find_config_fallback(self):
- tmpdir = self.tmpdir('test_find_config_fallback')
+ tmpdir = self.mkdir('test_find_config_fallback')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert config.find_config_file() is None
def test_find_config_from_explicit_path(self):
- tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ tmpdir = self.mkdir('test_find_config_from_explicit_path')
config_path = tmpdir.ensure('my-config-file.json')
assert config.find_config_file(str(config_path)) == str(config_path)
def test_find_config_from_environment(self):
- tmpdir = self.tmpdir('test_find_config_from_environment')
+ tmpdir = self.mkdir('test_find_config_from_environment')
config_path = tmpdir.ensure('config.json')
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
@@ -41,7 +41,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_posix(self):
- tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ tmpdir = self.mkdir('test_find_config_from_home_posix')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
@@ -49,7 +49,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_legacy_name(self):
- tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ tmpdir = self.mkdir('test_find_config_from_home_legacy_name')
config_path = tmpdir.ensure('.dockercfg')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
@@ -57,7 +57,7 @@ class FindConfigFileTest(unittest.TestCase):
@mark.skipif("sys.platform != 'win32'")
def test_find_config_from_home_windows(self):
- tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ tmpdir = self.mkdir('test_find_config_from_home_windows')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
index f7aefd0..821ebe4 100644
--- a/tests/unit/utils_json_stream_test.py
+++ b/tests/unit/utils_json_stream_test.py
@@ -1,11 +1,7 @@
-# encoding: utf-8
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
-class TestJsonSplitter(object):
+class TestJsonSplitter:
def test_json_splitter_no_object(self):
data = '{"foo": "bar'
@@ -20,7 +16,7 @@ class TestJsonSplitter(object):
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
-class TestStreamAsText(object):
+class TestStreamAsText:
def test_stream_with_non_utf_unicode_character(self):
stream = [b'\xed\xf3\xf3']
@@ -28,12 +24,12 @@ class TestStreamAsText(object):
assert output == '���'
def test_stream_with_utf_character(self):
- stream = ['ěĝ'.encode('utf-8')]
+ stream = ['ěĝ'.encode()]
output, = stream_as_text(stream)
assert output == 'ěĝ'
-class TestJsonStream(object):
+class TestJsonStream:
def test_with_falsy_entries(self):
stream = [
diff --git a/tests/unit/utils_proxy_test.py b/tests/unit/utils_proxy_test.py
new file mode 100644
index 0000000..2da6040
--- /dev/null
+++ b/tests/unit/utils_proxy_test.py
@@ -0,0 +1,81 @@
+import unittest
+
+from docker.utils.proxy import ProxyConfig
+
+HTTP = 'http://test:80'
+HTTPS = 'https://test:443'
+FTP = 'ftp://user:password@host:23'
+NO_PROXY = 'localhost,.localdomain'
+CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY)
+ENV = {
+ 'http_proxy': HTTP,
+ 'HTTP_PROXY': HTTP,
+ 'https_proxy': HTTPS,
+ 'HTTPS_PROXY': HTTPS,
+ 'ftp_proxy': FTP,
+ 'FTP_PROXY': FTP,
+ 'no_proxy': NO_PROXY,
+ 'NO_PROXY': NO_PROXY,
+}
+
+
+class ProxyConfigTest(unittest.TestCase):
+
+ def test_from_dict(self):
+ config = ProxyConfig.from_dict({
+ 'httpProxy': HTTP,
+ 'httpsProxy': HTTPS,
+ 'ftpProxy': FTP,
+ 'noProxy': NO_PROXY
+ })
+ self.assertEqual(CONFIG.http, config.http)
+ self.assertEqual(CONFIG.https, config.https)
+ self.assertEqual(CONFIG.ftp, config.ftp)
+ self.assertEqual(CONFIG.no_proxy, config.no_proxy)
+
+ def test_new(self):
+ config = ProxyConfig()
+ self.assertIsNone(config.http)
+ self.assertIsNone(config.https)
+ self.assertIsNone(config.ftp)
+ self.assertIsNone(config.no_proxy)
+
+ config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d')
+ self.assertEqual(config.http, 'a')
+ self.assertEqual(config.https, 'b')
+ self.assertEqual(config.ftp, 'c')
+ self.assertEqual(config.no_proxy, 'd')
+
+ def test_truthiness(self):
+ assert not ProxyConfig()
+ assert ProxyConfig(http='non-zero')
+ assert ProxyConfig(https='non-zero')
+ assert ProxyConfig(ftp='non-zero')
+ assert ProxyConfig(no_proxy='non-zero')
+
+ def test_environment(self):
+ self.assertDictEqual(CONFIG.get_environment(), ENV)
+ empty = ProxyConfig()
+ self.assertDictEqual(empty.get_environment(), {})
+
+ def test_inject_proxy_environment(self):
+ # Proxy config is non null, env is None.
+ self.assertSetEqual(
+ set(CONFIG.inject_proxy_environment(None)),
+ {f'{k}={v}' for k, v in ENV.items()})
+
+ # Proxy config is null, env is None.
+ self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None)
+
+ env = ['FOO=BAR', 'BAR=BAZ']
+
+ # Proxy config is non null, env is non null
+ actual = CONFIG.inject_proxy_environment(env)
+ expected = [f'{k}={v}' for k, v in ENV.items()] + env
+ # It's important that the first 8 variables are the ones from the proxy
+ # config, and the last 2 are the ones from the input environment
+ self.assertSetEqual(set(actual[:8]), set(expected[:8]))
+ self.assertSetEqual(set(actual[-2:]), set(expected[-2:]))
+
+ # Proxy is null, and is non null
+ self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env)
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 8880cfe..802d919 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -1,30 +1,22 @@
-# -*- coding: utf-8 -*-
-
import base64
import json
import os
import os.path
import shutil
-import sys
import tempfile
import unittest
-
+import pytest
from docker.api.client import APIClient
+from docker.constants import IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION
from docker.errors import DockerException
-from docker.utils import (
- convert_filters, convert_volume_binds, decode_json_header, kwargs_from_env,
- parse_bytes, parse_devices, parse_env_file, parse_host,
- parse_repository_tag, split_command, update_headers,
-)
-
+from docker.utils import (convert_filters, convert_volume_binds,
+ decode_json_header, kwargs_from_env, parse_bytes,
+ parse_devices, parse_env_file, parse_host,
+ parse_repository_tag, split_command, update_headers)
from docker.utils.ports import build_port_bindings, split_port
from docker.utils.utils import format_environment
-import pytest
-
-import six
-
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
'testdata/certs',
@@ -40,7 +32,7 @@ class DecoratorsTest(unittest.TestCase):
def f(self, headers=None):
return headers
- client = APIClient()
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
client._general_configs = {}
g = update_headers(f)
@@ -83,15 +75,18 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
- assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
assert 'ca.pem' in kwargs['tls'].ca_cert
assert 'cert.pem' in kwargs['tls'].cert[0]
assert 'key.pem' in kwargs['tls'].cert[1]
assert kwargs['tls'].assert_hostname is False
assert kwargs['tls'].verify
+
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
try:
client = APIClient(**kwargs)
- assert kwargs['base_url'] == client.base_url
+ assert parsed_host == client.base_url
assert kwargs['tls'].ca_cert == client.verify
assert kwargs['tls'].cert == client.cert
except TypeError as e:
@@ -102,15 +97,17 @@ class KwargsFromEnvTest(unittest.TestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
- assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
assert 'ca.pem' in kwargs['tls'].ca_cert
assert 'cert.pem' in kwargs['tls'].cert[0]
assert 'key.pem' in kwargs['tls'].cert[1]
assert kwargs['tls'].assert_hostname is True
assert kwargs['tls'].verify is False
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
try:
client = APIClient(**kwargs)
- assert kwargs['base_url'] == client.base_url
+ assert parsed_host == client.base_url
assert kwargs['tls'].cert == client.cert
assert not kwargs['tls'].verify
except TypeError as e:
@@ -195,22 +192,22 @@ class ConverVolumeBindsTest(unittest.TestCase):
assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_unicode_bytes_input(self):
- expected = [u'/mnt/지연:/unicode/박:rw']
+ expected = ['/mnt/지연:/unicode/박:rw']
data = {
- u'/mnt/지연'.encode('utf-8'): {
- 'bind': u'/unicode/박'.encode('utf-8'),
+ '/mnt/지연'.encode(): {
+ 'bind': '/unicode/박'.encode(),
'mode': 'rw'
}
}
assert convert_volume_binds(data) == expected
def test_convert_volume_binds_unicode_unicode_input(self):
- expected = [u'/mnt/지연:/unicode/박:rw']
+ expected = ['/mnt/지연:/unicode/박:rw']
data = {
- u'/mnt/지연': {
- 'bind': u'/unicode/박',
+ '/mnt/지연': {
+ 'bind': '/unicode/박',
'mode': 'rw'
}
}
@@ -272,6 +269,11 @@ class ParseHostTest(unittest.TestCase):
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
+ 'ssh://:22/path',
+ 'tcp://netloc:3333/path?q=1',
+ 'unix:///sock/path#fragment',
+ 'https://netloc:3333/path;params',
+ 'ssh://:clearpassword@host:22',
]
valid_hosts = {
@@ -281,7 +283,7 @@ class ParseHostTest(unittest.TestCase):
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
- 'unix://': 'http+unix://var/run/docker.sock',
+ 'unix://': 'http+unix:///var/run/docker.sock',
'12.234.45.127:2375/docker/engine': (
'http://12.234.45.127:2375/docker/engine'
),
@@ -294,6 +296,9 @@ class ParseHostTest(unittest.TestCase):
'[fd12::82d1]:2375/docker/engine': (
'http://[fd12::82d1]:2375/docker/engine'
),
+ 'ssh://': 'ssh://127.0.0.1:22',
+ 'ssh://user@localhost:22': 'ssh://user@localhost:22',
+ 'ssh://user@remote': 'ssh://user@remote:22',
}
for host in invalid_hosts:
@@ -304,7 +309,7 @@ class ParseHostTest(unittest.TestCase):
assert parse_host(host, None) == expected
def test_parse_host_empty_value(self):
- unix_socket = 'http+unix://var/run/docker.sock'
+ unix_socket = 'http+unix:///var/run/docker.sock'
npipe = 'npipe:////./pipe/docker_engine'
for val in [None, '']:
@@ -351,14 +356,14 @@ class ParseRepositoryTagTest(unittest.TestCase):
)
def test_index_image_sha(self):
- assert parse_repository_tag("root@sha256:{0}".format(self.sha)) == (
- "root", "sha256:{0}".format(self.sha)
+ assert parse_repository_tag(f"root@sha256:{self.sha}") == (
+ "root", f"sha256:{self.sha}"
)
def test_private_reg_image_sha(self):
assert parse_repository_tag(
- "url:5000/repo@sha256:{0}".format(self.sha)
- ) == ("url:5000/repo", "sha256:{0}".format(self.sha))
+ f"url:5000/repo@sha256:{self.sha}"
+ ) == ("url:5000/repo", f"sha256:{self.sha}")
class ParseDeviceTest(unittest.TestCase):
@@ -435,11 +440,7 @@ class ParseBytesTest(unittest.TestCase):
parse_bytes("127.0.0.1K")
def test_parse_bytes_float(self):
- with pytest.raises(DockerException):
- parse_bytes("1.5k")
-
- def test_parse_bytes_maxint(self):
- assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
+ assert parse_bytes("1.5k") == 1536
class UtilsTest(unittest.TestCase):
@@ -449,8 +450,8 @@ class UtilsTest(unittest.TestCase):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
- ({'exited': 0}, '{"exited": [0]}'),
- ({'exited': [0, 1]}, '{"exited": [0, 1]}'),
+ ({'exited': 0}, '{"exited": ["0"]}'),
+ ({'exited': [0, 1]}, '{"exited": ["0", "1"]}'),
]
for filters, expected in tests:
@@ -459,20 +460,13 @@ class UtilsTest(unittest.TestCase):
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
- if six.PY3:
- data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
- else:
- data = base64.urlsafe_b64encode(json.dumps(obj))
+ data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
decoded_data = decode_json_header(data)
assert obj == decoded_data
class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
- assert split_command(u'echo μμ') == ['echo', 'μμ']
-
- @pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
- def test_split_command_with_bytes(self):
assert split_command('echo μμ') == ['echo', 'μμ']
@@ -483,9 +477,12 @@ class PortsTest(unittest.TestCase):
assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_protocol(self):
- internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
- assert internal_port == ["2000/udp"]
- assert external_port == [("127.0.0.1", "1000")]
+ for protocol in ['tcp', 'udp', 'sctp']:
+ internal_port, external_port = split_port(
+ "127.0.0.1:1000:2000/" + protocol
+ )
+ assert internal_port == ["2000/" + protocol]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
@@ -534,10 +531,20 @@ class PortsTest(unittest.TestCase):
assert internal_port == ["2000"]
assert external_port == [("2001:abcd:ef00::2", "1000")]
+ def test_split_port_with_ipv6_square_brackets_address(self):
+ internal_port, external_port = split_port(
+ "[2001:abcd:ef00::2]:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
+
def test_split_port_invalid(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000:tcp")
+ def test_split_port_invalid_protocol(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000/ftp")
+
def test_non_matching_length_port_ranges(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000-1010:2000-2002/tcp")
@@ -609,7 +616,7 @@ class FormatEnvironmentTest(unittest.TestCase):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
}
- assert format_environment(env_dict) == [u'ARTIST_NAME=송지은']
+ assert format_environment(env_dict) == ['ARTIST_NAME=송지은']
def test_format_env_no_value(self):
env_dict = {