summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--PKG-INFO21
-rw-r--r--README.md4
-rw-r--r--debian/changelog42
-rw-r--r--debian/clean1
-rw-r--r--debian/control20
-rw-r--r--debian/patches/requirements.patch55
-rwxr-xr-xdebian/rules6
-rw-r--r--debian/tests/control6
-rw-r--r--docker.egg-info/PKG-INFO21
-rw-r--r--docker.egg-info/SOURCES.txt11
-rw-r--r--docker.egg-info/requires.txt16
-rw-r--r--docker/__init__.py3
-rw-r--r--docker/api/build.py12
-rw-r--r--docker/api/client.py103
-rw-r--r--docker/api/config.py15
-rw-r--r--docker/api/container.py88
-rw-r--r--docker/api/daemon.py4
-rw-r--r--docker/api/exec_api.py6
-rw-r--r--docker/api/image.py52
-rw-r--r--docker/api/network.py11
-rw-r--r--docker/api/plugin.py6
-rw-r--r--docker/api/secret.py7
-rw-r--r--docker/api/service.py4
-rw-r--r--docker/api/swarm.py14
-rw-r--r--docker/api/volume.py8
-rw-r--r--docker/auth.py38
-rw-r--r--docker/client.py26
-rw-r--r--docker/constants.py18
-rw-r--r--docker/context/__init__.py3
-rw-r--r--docker/context/api.py203
-rw-r--r--docker/context/config.py81
-rw-r--r--docker/context/context.py243
-rw-r--r--docker/credentials/store.py27
-rw-r--r--docker/errors.py58
-rw-r--r--docker/models/configs.py2
-rw-r--r--docker/models/containers.py22
-rw-r--r--docker/models/images.py40
-rw-r--r--docker/models/networks.py2
-rw-r--r--docker/models/plugins.py7
-rw-r--r--docker/models/resource.py9
-rw-r--r--docker/models/secrets.py3
-rw-r--r--docker/models/services.py9
-rw-r--r--docker/models/swarm.py2
-rw-r--r--docker/tls.py12
-rw-r--r--docker/transport/basehttpadapter.py2
-rw-r--r--docker/transport/npipeconn.py27
-rw-r--r--docker/transport/npipesocket.py20
-rw-r--r--docker/transport/sshconn.py201
-rw-r--r--docker/transport/ssladapter.py4
-rw-r--r--docker/transport/unixconn.py42
-rw-r--r--docker/types/__init__.py4
-rw-r--r--docker/types/base.py5
-rw-r--r--docker/types/containers.py171
-rw-r--r--docker/types/daemon.py4
-rw-r--r--docker/types/healthcheck.py8
-rw-r--r--docker/types/networks.py11
-rw-r--r--docker/types/services.py45
-rw-r--r--docker/utils/build.py30
-rw-r--r--docker/utils/config.py6
-rw-r--r--docker/utils/decorators.py2
-rw-r--r--docker/utils/fnmatch.py2
-rw-r--r--docker/utils/json_stream.py13
-rw-r--r--docker/utils/ports.py4
-rw-r--r--docker/utils/socket.py14
-rw-r--r--docker/utils/utils.py90
-rw-r--r--docker/version.py4
-rw-r--r--requirements.txt12
-rw-r--r--setup.py26
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/helpers.py11
-rw-r--r--tests/integration/api_build_test.py20
-rw-r--r--tests/integration/api_client_test.py2
-rw-r--r--tests/integration/api_config_test.py17
-rw-r--r--tests/integration/api_container_test.py108
-rw-r--r--tests/integration/api_exec_test.py2
-rw-r--r--tests/integration/api_image_test.py21
-rw-r--r--tests/integration/api_network_test.py23
-rw-r--r--tests/integration/api_secret_test.py4
-rw-r--r--tests/integration/api_service_test.py78
-rw-r--r--tests/integration/api_swarm_test.py4
-rw-r--r--tests/integration/base.py4
-rw-r--r--tests/integration/conftest.py6
-rw-r--r--tests/integration/context_api_test.py59
-rw-r--r--tests/integration/credentials/store_test.py7
-rw-r--r--tests/integration/credentials/utils_test.py2
-rw-r--r--tests/integration/models_images_test.py30
-rw-r--r--tests/integration/models_services_test.py38
-rw-r--r--tests/integration/regression_test.py10
-rw-r--r--tests/ssh/__init__.py0
-rw-r--r--tests/ssh/api_build_test.py590
-rw-r--r--tests/ssh/base.py130
-rw-r--r--tests/unit/api_container_test.py89
-rw-r--r--tests/unit/api_exec_test.py10
-rw-r--r--tests/unit/api_image_test.py25
-rw-r--r--tests/unit/api_network_test.py24
-rw-r--r--tests/unit/api_test.py93
-rw-r--r--tests/unit/api_volume_test.py4
-rw-r--r--tests/unit/auth_test.py22
-rw-r--r--tests/unit/client_test.py188
-rw-r--r--tests/unit/context_test.py49
-rw-r--r--tests/unit/dockertypes_test.py4
-rw-r--r--tests/unit/errors_test.py22
-rw-r--r--tests/unit/fake_api.py111
-rw-r--r--tests/unit/fake_api_client.py13
-rw-r--r--tests/unit/models_containers_test.py4
-rw-r--r--tests/unit/models_images_test.py29
-rw-r--r--tests/unit/models_resources_test.py2
-rw-r--r--tests/unit/models_secrets_test.py11
-rw-r--r--tests/unit/models_services_test.py10
-rw-r--r--tests/unit/sshadapter_test.py39
-rw-r--r--tests/unit/ssladapter_test.py38
-rw-r--r--tests/unit/swarm_test.py2
-rw-r--r--tests/unit/utils_build_test.py114
-rw-r--r--tests/unit/utils_config_test.py2
-rw-r--r--tests/unit/utils_json_stream_test.py12
-rw-r--r--tests/unit/utils_proxy_test.py7
-rw-r--r--tests/unit/utils_test.py68
117 files changed, 3190 insertions, 1057 deletions
diff --git a/PKG-INFO b/PKG-INFO
index 8754725..58deb13 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,18 +1,18 @@
Metadata-Version: 2.1
Name: docker
-Version: 4.1.0
+Version: 5.0.3
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Maintainer: Joffrey F
-Maintainer-email: joffrey@docker.com
+Maintainer: Ulysses Souza
+Maintainer-email: ulysses.souza@docker.com
License: Apache License 2.0
-Project-URL: Source, https://github.com/docker/docker-py
Project-URL: Documentation, https://docker-py.readthedocs.io
-Project-URL: Tracker, https://github.com/docker/docker-py/issues
Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
Description: # Docker SDK for Python
- [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
+ [![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
@@ -70,7 +70,7 @@ Description: # Docker SDK for Python
```python
>>> for line in container.logs(stream=True):
- ... print line.strip()
+ ... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...
@@ -94,16 +94,15 @@ Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Requires-Python: >=3.6
Description-Content-Type: text/markdown
Provides-Extra: tls
Provides-Extra: ssh
diff --git a/README.md b/README.md
index 3ff124d..4fc31f7 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# Docker SDK for Python
-[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
@@ -58,7 +58,7 @@ You can stream logs:
```python
>>> for line in container.logs(stream=True):
-... print line.strip()
+... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...
diff --git a/debian/changelog b/debian/changelog
index a20fc93..68b859b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,45 @@
+python-docker (5.0.3-1) unstable; urgency=medium
+
+ [ Andrej Shadura ]
+ * New upstream release.
+ * Refresh the patch.
+ * Drop outdated dependency on python3-six.
+ * Build-depend on dh-sequence-python3 instead of --with python3.
+ * Clean eggs in debian/clean.
+
+ [ Anthony Fok ]
+ * Remove dependency on python3-dockerpycreds.
+
+ -- Andrej Shadura <andrewsh@debian.org> Tue, 12 Oct 2021 10:36:52 +0200
+
+python-docker (4.4.4-1) unstable; urgency=medium
+
+ * Upload to unstable.
+ * Add myself as uploader.
+
+ -- Thomas Goirand <zigo@debian.org> Mon, 16 Aug 2021 13:12:56 +0200
+
+python-docker (4.4.4-0.1) experimental; urgency=medium
+
+ * Non-maintainer upload.
+ * New upstream release.
+ * Rebased requirements.patch.
+
+ -- Thomas Goirand <zigo@debian.org> Sat, 27 Mar 2021 19:55:48 +0100
+
+python-docker (4.1.0-1.2) unstable; urgency=medium
+
+ * Uploading source-only.
+
+ -- Thomas Goirand <zigo@debian.org> Fri, 08 May 2020 10:36:18 +0200
+
+python-docker (4.1.0-1.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Add python3-distutils runtime depends. (Closes: #958577)
+
+ -- Thomas Goirand <zigo@debian.org> Fri, 08 May 2020 10:32:27 +0200
+
python-docker (4.1.0-1) unstable; urgency=medium
* New upstream version 4.1.0
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 0000000..45149aa
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1 @@
+*.egg-info/*
diff --git a/debian/control b/debian/control
index 705241f..401b125 100644
--- a/debian/control
+++ b/debian/control
@@ -2,16 +2,17 @@ Source: python-docker
Section: python
Priority: optional
Maintainer: Docker Compose Team <team+docker-compose@tracker.debian.org>
-Uploaders: Jason Pleau <jason@jpleau.ca>,
- Felipe Sateler <fsateler@debian.org>
-Build-Depends: debhelper-compat (= 12),
- dh-python,
+Uploaders:
+ Jason Pleau <jason@jpleau.ca>,
+ Felipe Sateler <fsateler@debian.org>,
+ Thomas Goirand <zigo@debian.org>,
+Build-Depends:
+ debhelper-compat (= 12),
+ dh-sequence-python3,
python3-all,
+ python3-requests (>= 2.14.2~),
python3-setuptools,
- python3-requests (>= 2.11.1~),
- python3-six (>= 1.4.0~),
python3-websocket (>= 0.32.0~),
- python3-dockerpycreds (>= 0.2.2),
Standards-Version: 4.4.1
Homepage: https://github.com/docker/docker-py
Vcs-Git: https://salsa.debian.org/docker-compose-team/python-docker
@@ -19,7 +20,10 @@ Vcs-Browser: https://salsa.debian.org/docker-compose-team/python-docker
Package: python3-docker
Architecture: all
-Depends: ${misc:Depends}, ${python3:Depends}
+Depends:
+ python3-distutils,
+ ${misc:Depends},
+ ${python3:Depends},
Description: Python 3 wrapper to access docker.io's control socket
This package contains oodles of routines that aid in controlling
docker.io over it's socket control, the same way the docker.io
diff --git a/debian/patches/requirements.patch b/debian/patches/requirements.patch
index f8e9195..e018d09 100644
--- a/debian/patches/requirements.patch
+++ b/debian/patches/requirements.patch
@@ -6,20 +6,21 @@ Forwarded: https://github.com/dotcloud/docker-py/issues/101 (upstream has no int
Patch-Name: requirements.patch
---
- requirements.txt | 37 +++++++++++++++++++------------------
- test-requirements.txt | 12 ++++++------
- 2 files changed, 25 insertions(+), 24 deletions(-)
+ requirements.txt | 33 ++++++++++++++++-----------------
+ setup.py | 2 +-
+ test-requirements.txt | 14 +++++++-------
+ 3 files changed, 24 insertions(+), 25 deletions(-)
diff --git a/requirements.txt b/requirements.txt
-index 804a78a..95d8fa6 100644
+index 26cbc6f..308ac48 100644
--- a/requirements.txt
+++ b/requirements.txt
-@@ -1,19 +1,20 @@
+@@ -1,17 +1,16 @@
-appdirs==1.4.3
-asn1crypto==0.22.0
-backports.ssl-match-hostname==3.5.0.1
--cffi==1.10.0
--cryptography==2.3
+-cffi==1.14.4
+-cryptography==3.4.7
-enum34==1.1.6
-idna==2.5
-ipaddress==1.0.18
@@ -28,12 +29,15 @@ index 804a78a..95d8fa6 100644
-pycparser==2.17
-pyOpenSSL==18.0.0
-pyparsing==2.2.0
--pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
+-pywin32==301; sys_platform == 'win32'
+-requests==2.26.0
+-urllib3==1.26.5
+-websocket-client==0.56.0
+appdirs>=1.4.3
+asn1crypto>=0.22.0
+backports.ssl-match-hostname>=3.5.0.1
-+cffi>=1.10.0
-+cryptography>=2.3
++cffi>=1.14.4
++cryptography>=3.2
+enum34>=1.1.6
+idna>=2.5
+ipaddress>=1.0.18
@@ -42,28 +46,35 @@ index 804a78a..95d8fa6 100644
+pycparser>=2.17
+pyOpenSSL>=18.0.0
+pyparsing>=2.2.0
-+pypiwin32>=219; sys_platform == 'win32' and python_version < '3.6'
- pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
--requests==2.20.0
--six==1.10.0
--urllib3==1.24.3
--websocket-client==0.56.0
-+requests>=2.20.2
-+six>=1.10.0
-+websocket-client>=0.40.0
-+urllib3>=1.24.3
++requests>=2.25.0
++urllib3>=1.26.5
+websocket-client>=0.56.0
+diff --git a/setup.py b/setup.py
+index a966fea..5b9945d 100644
+--- a/setup.py
++++ b/setup.py
+@@ -26,7 +26,7 @@ extras_require = {
+ # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
+ # installing the extra dependencies, install the following instead:
+ # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
+- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],
++ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.2', 'idna>=2.0.0'],
+
+ # Only required when connecting using the ssh:// protocol
+ 'ssh': ['paramiko>=2.4.2'],
diff --git a/test-requirements.txt b/test-requirements.txt
-index 0b01e56..ebc74f2 100644
+index 40161bb..585e3fd 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
-@@ -1,6 +1,6 @@
+@@ -1,7 +1,7 @@
+-setuptools==54.1.1
-coverage==4.5.2
-flake8==3.6.0
-mock==1.0.1
-pytest==4.3.1
-pytest-cov==2.6.1
-pytest-timeout==1.3.3
++setuptools>=54.1.1
+coverage>=4.5.2
+flake8>=3.6.0
+mock>=1.0.1
diff --git a/debian/rules b/debian/rules
index 9ffd4ee..ae44baf 100755
--- a/debian/rules
+++ b/debian/rules
@@ -5,9 +5,5 @@ export PYBUILD_NAME=docker
# The pypi bundle includes the tests (https://github.com/docker/docker-py/issues/308), but they're really invasive and fail for reasons I haven't yet diagnosed.
export PYBUILD_DISABLE=test
-override_dh_auto_clean:
- dh_auto_clean
- rm -f docker_py.egg-info/SOURCES.txt
-
%:
- dh $@ --with python3 --buildsystem pybuild
+ dh $@ --buildsystem pybuild
diff --git a/debian/tests/control b/debian/tests/control
index 6163e1b..a94503d 100644
--- a/debian/tests/control
+++ b/debian/tests/control
@@ -1,3 +1,7 @@
Tests: integration
-Depends: docker.io, python3-mock, python3-pytest, @
+Depends:
+ docker.io,
+ python3-mock,
+ python3-pytest,
+ @,
Restrictions: isolation-machine needs-root
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
index 8754725..58deb13 100644
--- a/docker.egg-info/PKG-INFO
+++ b/docker.egg-info/PKG-INFO
@@ -1,18 +1,18 @@
Metadata-Version: 2.1
Name: docker
-Version: 4.1.0
+Version: 5.0.3
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
-Maintainer: Joffrey F
-Maintainer-email: joffrey@docker.com
+Maintainer: Ulysses Souza
+Maintainer-email: ulysses.souza@docker.com
License: Apache License 2.0
-Project-URL: Source, https://github.com/docker/docker-py
Project-URL: Documentation, https://docker-py.readthedocs.io
-Project-URL: Tracker, https://github.com/docker/docker-py/issues
Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
Description: # Docker SDK for Python
- [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
+ [![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/docker/docker-py/actions/workflows/ci.yml/)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
@@ -70,7 +70,7 @@ Description: # Docker SDK for Python
```python
>>> for line in container.logs(stream=True):
- ... print line.strip()
+ ... print(line.strip())
Reticulating spline 2...
Reticulating spline 3...
...
@@ -94,16 +94,15 @@ Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
Classifier: Topic :: Software Development
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Requires-Python: >=3.6
Description-Content-Type: text/markdown
Provides-Extra: tls
Provides-Extra: ssh
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
index 6daee4c..0e0320f 100644
--- a/docker.egg-info/SOURCES.txt
+++ b/docker.egg-info/SOURCES.txt
@@ -32,6 +32,10 @@ docker/api/secret.py
docker/api/service.py
docker/api/swarm.py
docker/api/volume.py
+docker/context/__init__.py
+docker/context/api.py
+docker/context/config.py
+docker/context/context.py
docker/credentials/__init__.py
docker/credentials/constants.py
docker/credentials/errors.py
@@ -95,6 +99,7 @@ tests/integration/api_volume_test.py
tests/integration/base.py
tests/integration/client_test.py
tests/integration/conftest.py
+tests/integration/context_api_test.py
tests/integration/errors_test.py
tests/integration/models_containers_test.py
tests/integration/models_images_test.py
@@ -110,6 +115,9 @@ tests/integration/credentials/store_test.py
tests/integration/credentials/utils_test.py
tests/integration/testdata/dummy-plugin/config.json
tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
+tests/ssh/__init__.py
+tests/ssh/api_build_test.py
+tests/ssh/base.py
tests/unit/__init__.py
tests/unit/api_build_test.py
tests/unit/api_container_test.py
@@ -120,6 +128,7 @@ tests/unit/api_test.py
tests/unit/api_volume_test.py
tests/unit/auth_test.py
tests/unit/client_test.py
+tests/unit/context_test.py
tests/unit/dockertypes_test.py
tests/unit/errors_test.py
tests/unit/fake_api.py
@@ -129,7 +138,9 @@ tests/unit/models_containers_test.py
tests/unit/models_images_test.py
tests/unit/models_networks_test.py
tests/unit/models_resources_test.py
+tests/unit/models_secrets_test.py
tests/unit/models_services_test.py
+tests/unit/sshadapter_test.py
tests/unit/ssladapter_test.py
tests/unit/swarm_test.py
tests/unit/types_containers_test.py
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
index 415d2e6..06622f8 100644
--- a/docker.egg-info/requires.txt
+++ b/docker.egg-info/requires.txt
@@ -1,23 +1,13 @@
-six>=1.4.0
websocket-client>=0.32.0
requests!=2.18.0,>=2.14.2
-[:python_version < "3.3"]
-ipaddress>=1.0.16
-
-[:python_version < "3.5"]
-backports.ssl_match_hostname>=3.5
-
-[:sys_platform == "win32" and python_version < "3.6"]
-pypiwin32==219
-
-[:sys_platform == "win32" and python_version >= "3.6"]
-pypiwin32==223
+[:sys_platform == "win32"]
+pywin32==227
[ssh]
paramiko>=2.4.2
[tls]
pyOpenSSL>=17.5.0
-cryptography>=1.3.4
+cryptography>=3.4.7
idna>=2.0.0
diff --git a/docker/__init__.py b/docker/__init__.py
index cf732e1..e5c1a8f 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,9 @@
# flake8: noqa
from .api import APIClient
from .client import DockerClient, from_env
+from .context import Context
+from .context import ContextAPI
+from .tls import TLSConfig
from .version import version, version_info
__version__ = version
diff --git a/docker/api/build.py b/docker/api/build.py
index 365129a..aac43c4 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -12,7 +12,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class BuildApiMixin(object):
+class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
@@ -132,7 +132,7 @@ class BuildApiMixin(object):
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
- 'Invalid container_limits key {0}'.format(key)
+ f'Invalid container_limits key {key}'
)
if custom_context:
@@ -150,7 +150,7 @@ class BuildApiMixin(object):
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
- with open(dockerignore, 'r') as f:
+ with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
@@ -313,7 +313,7 @@ class BuildApiMixin(object):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
- 'Sending auth config ({0})'.format(
+ 'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
@@ -344,9 +344,9 @@ def process_dockerfile(dockerfile, path):
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
- with open(abs_dockerfile, 'r') as df:
+ with open(abs_dockerfile) as df:
return (
- '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
diff --git a/docker/api/client.py b/docker/api/client.py
index 35dc84e..2667922 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -1,12 +1,25 @@
import json
import struct
+import urllib
from functools import partial
import requests
import requests.exceptions
-import six
import websocket
+from .. import auth
+from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
+from ..errors import (DockerException, InvalidVersion, TLSParameterError,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
+from ..utils import check_resource, config, update_headers, utils
+from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
@@ -19,22 +32,7 @@ from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
-from .. import auth
-from ..constants import (
- DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
- DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION,
- STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS
-)
-from ..errors import (
- DockerException, InvalidVersion, TLSParameterError,
- create_api_error_from_http_exception
-)
-from ..tls import TLSConfig
-from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
-from ..utils import utils, check_resource, update_headers, config
-from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor
-from ..utils.json_stream import json_stream
-from ..utils.proxy import ProxyConfig
+
try:
from ..transport import NpipeHTTPAdapter
except ImportError:
@@ -91,6 +89,11 @@ class APIClient(
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@@ -102,8 +105,9 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None,
- credstore_env=None):
- super(APIClient, self).__init__()
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super().__init__()
if tls and not base_url:
raise TLSParameterError(
@@ -138,7 +142,8 @@ class APIClient(
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter(
- base_url, timeout, pool_connections=num_pools
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
@@ -152,7 +157,8 @@ class APIClient(
)
try:
self._custom_adapter = NpipeHTTPAdapter(
- base_url, timeout, pool_connections=num_pools
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
except NameError:
raise DockerException(
@@ -163,7 +169,8 @@ class APIClient(
elif base_url.startswith('ssh://'):
try:
self._custom_adapter = SSHHTTPAdapter(
- base_url, timeout, pool_connections=num_pools
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
)
except NameError:
raise DockerException(
@@ -183,16 +190,16 @@ class APIClient(
self.base_url = base_url
# version detection needs to be after unix adapter mounting
- if version is None:
- self._version = DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
+ if version is None or (isinstance(
+ version,
+ str
+ ) and version.lower() == 'auto'):
+ self._version = self._retrieve_server_version()
else:
+ self._version = version
+ if not isinstance(self._version, str):
raise DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
+ 'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
@@ -212,7 +219,7 @@ class APIClient(
)
except Exception as e:
raise DockerException(
- 'Error while fetching server API version: {0}'.format(e)
+ f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
@@ -239,21 +246,21 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
- if not isinstance(arg, six.string_types):
+ if not isinstance(arg, str):
raise ValueError(
- 'Expected a string but found {0} ({1}) '
+ 'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
- quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
+ return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+ return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -277,7 +284,7 @@ class APIClient(
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
- for k, v in six.iteritems(data):
+ for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
@@ -313,12 +320,10 @@ class APIClient(
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
- elif six.PY3:
+ else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
@@ -336,8 +341,7 @@ class APIClient(
if response.raw._fp.chunked:
if decode:
- for chunk in json_stream(self._stream_helper(response, False)):
- yield chunk
+ yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
@@ -393,8 +397,13 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
- for out in response.iter_content(chunk_size, decode):
- yield out
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
@@ -458,7 +467,7 @@ class APIClient(
self._result(res, binary=True)
self._raise_for_status(res)
- sep = six.binary_type()
+ sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
@@ -472,7 +481,7 @@ class APIClient(
def get_adapter(self, url):
try:
- return super(APIClient, self).get_adapter(url)
+ return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
@@ -490,7 +499,7 @@ class APIClient(
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
None
diff --git a/docker/api/config.py b/docker/api/config.py
index 93e5168..88c367e 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -1,13 +1,11 @@
import base64
-import six
-
from .. import utils
-class ConfigApiMixin(object):
+class ConfigApiMixin:
@utils.minimum_version('1.30')
- def create_config(self, name, data, labels=None):
+ def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
@@ -15,6 +13,9 @@ class ConfigApiMixin(object):
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
+ templating (dict): dictionary containing the name of the
+ templating driver to be used expressed as
+ { name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
@@ -22,12 +23,12 @@ class ConfigApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
- 'Labels': labels
+ 'Labels': labels,
+ 'Templating': templating
}
url = self._url('/configs/create')
diff --git a/docker/api/container.py b/docker/api/container.py
index 45bd352..83fcd4f 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,7 +1,5 @@
from datetime import datetime
-import six
-
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
@@ -12,7 +10,7 @@ from ..types import HostConfig
from ..types import NetworkingConfig
-class ContainerApiMixin(object):
+class ContainerApiMixin:
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False, demux=False):
@@ -244,9 +242,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
@@ -258,22 +256,22 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
Or without host port assignment:
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
@@ -283,7 +281,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
@@ -291,7 +289,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
@@ -307,9 +305,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds={
+ host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
@@ -326,9 +324,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds=[
+ host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
@@ -346,15 +344,15 @@ class ContainerApiMixin(object):
.. code-block:: python
- networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config(
+ networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
- ctnr = docker_client.create_container(
+ ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -408,7 +406,7 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(environment, dict):
@@ -480,6 +478,9 @@ class ContainerApiMixin(object):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
@@ -503,7 +504,7 @@ class ContainerApiMixin(object):
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
- mem_reservation (int or str): Memory soft limit.
+ mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -520,6 +521,8 @@ class ContainerApiMixin(object):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``port_bindings``.
+
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
@@ -528,7 +531,8 @@ class ContainerApiMixin(object):
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
- for more information.
+ for more information.
+ Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -575,7 +579,7 @@ class ContainerApiMixin(object):
Example:
- >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ >>> client.api.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
@@ -606,11 +610,11 @@ class ContainerApiMixin(object):
Example:
- >>> docker_client.create_network('network1')
- >>> networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config()
+ >>> client.api.create_network('network1')
+ >>> networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config()
})
- >>> container = docker_client.create_container(
+ >>> container = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -636,13 +640,15 @@ class ContainerApiMixin(object):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
Example:
- >>> endpoint_config = client.create_endpoint_config(
+ >>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
@@ -694,7 +700,8 @@ class ContainerApiMixin(object):
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
- def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@@ -705,6 +712,8 @@ class ContainerApiMixin(object):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -718,7 +727,7 @@ class ContainerApiMixin(object):
>>> c = docker.APIClient()
>>> f = open('./sh_bin.tar', 'wb')
- >>> bits, stat = c.get_archive(container, '/bin/sh')
+ >>> bits, stat = c.api.get_archive(container, '/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
@@ -729,8 +738,13 @@ class ContainerApiMixin(object):
params = {
'path': path
}
+ headers = {
+ "Accept-Encoding": "gzip, deflate"
+ } if encode_stream else {
+ "Accept-Encoding": "identity"
+ }
url = self._url('/containers/{0}/archive', container)
- res = self._get(url, params=params, stream=True)
+ res = self._get(url, params=params, stream=True, headers=headers)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
@@ -774,7 +788,7 @@ class ContainerApiMixin(object):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
- if not isinstance(signal, six.string_types):
+ if not isinstance(signal, str):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
@@ -900,7 +914,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- >>> cli.port('7174d6347063', 80)
+ >>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
@@ -1079,10 +1093,10 @@ class ContainerApiMixin(object):
Example:
- >>> container = cli.create_container(
+ >>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
- >>> cli.start(container=container.get('Id'))
+ >>> client.api.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
@@ -1120,7 +1134,7 @@ class ContainerApiMixin(object):
else:
if decode:
raise errors.InvalidArgument(
- "decode is only available in conjuction with stream=True"
+ "decode is only available in conjunction with stream=True"
)
return self._result(self._get(url, params={'stream': False}),
json=True)
@@ -1206,8 +1220,8 @@ class ContainerApiMixin(object):
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
- mem_limit (int or str): Memory limit
- mem_reservation (int or str): Memory soft limit
+ mem_limit (float or str): Memory limit
+ mem_reservation (float or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index f715a13..a857213 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils
-class DaemonApiMixin(object):
+class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
@@ -109,7 +109,7 @@ class DaemonApiMixin(object):
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 4c49ac3..496308a 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,10 +1,8 @@
-import six
-
from .. import errors
from .. import utils
-class ExecApiMixin(object):
+class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
@@ -45,7 +43,7 @@ class ExecApiMixin(object):
'Setting environment for exec is not supported in API < 1.25'
)
- if isinstance(cmd, six.string_types):
+ if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
diff --git a/docker/api/image.py b/docker/api/image.py
index 11c8cf7..772d889 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,15 +1,13 @@
import logging
import os
-import six
-
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
-class ImageApiMixin(object):
+class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@@ -31,7 +29,7 @@ class ImageApiMixin(object):
Example:
- >>> image = cli.get_image("busybox:latest")
+ >>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
@@ -81,10 +79,18 @@ class ImageApiMixin(object):
If the server returns an error.
"""
params = {
- 'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
+ if name:
+ if utils.version_lt(self._version, '1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ if filters:
+ filters['reference'] = name
+ else:
+ filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
@@ -122,7 +128,7 @@ class ImageApiMixin(object):
params = _import_image_params(
repository, tag, image,
- src=(src if isinstance(src, six.string_types) else None),
+ src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
@@ -131,7 +137,7 @@ class ImageApiMixin(object):
return self._result(
self._post(u, data=None, params=params)
)
- elif isinstance(src, six.string_types): # from file path
+ elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
@@ -343,13 +349,14 @@ class ImageApiMixin(object):
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
- decode=False, platform=None):
+ decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
- tag (str): The tag to pull
+ tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
+ is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
@@ -358,6 +365,8 @@ class ImageApiMixin(object):
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags, the ``tag`` parameter is
+ ignored.
Returns:
(generator or str): The output
@@ -368,7 +377,7 @@ class ImageApiMixin(object):
Example:
- >>> for line in cli.pull('busybox', stream=True, decode=True):
+ >>> for line in client.api.pull('busybox', stream=True, decode=True):
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
@@ -382,8 +391,12 @@ class ImageApiMixin(object):
}
"""
- if not tag:
- repository, tag = utils.parse_repository_tag(repository)
+ repository, image_tag = utils.parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
+
+ if all_tags:
+ tag = None
+
registry, repo_name = auth.resolve_repository_name(repository)
params = {
@@ -443,7 +456,7 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
- >>> for line in cli.push('yourname/app', stream=True, decode=True):
+ >>> for line in client.api.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
@@ -494,13 +507,14 @@ class ImageApiMixin(object):
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
- def search(self, term):
+ def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
+ limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
@@ -509,8 +523,12 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ params = {'term': term}
+ if limit is not None:
+ params['limit'] = limit
+
return self._result(
- self._get(self._url("/images/search"), params={'term': term}),
+ self._get(self._url("/images/search"), params=params),
True
)
@@ -534,7 +552,7 @@ class ImageApiMixin(object):
Example:
- >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
@@ -551,7 +569,7 @@ class ImageApiMixin(object):
def is_file(src):
try:
return (
- isinstance(src, six.string_types) and
+ isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
diff --git a/docker/api/network.py b/docker/api/network.py
index 750b91b..e95c5fc 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -4,7 +4,7 @@ from ..utils import version_lt
from .. import utils
-class NetworkApiMixin(object):
+class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker network ls`` command.
@@ -75,7 +75,7 @@ class NetworkApiMixin(object):
Example:
A network using the bridge driver:
- >>> client.create_network("network1", driver="bridge")
+ >>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
@@ -90,7 +90,7 @@ class NetworkApiMixin(object):
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
- >>> docker_client.create_network("network1", driver="bridge",
+ >>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
@@ -216,7 +216,7 @@ class NetworkApiMixin(object):
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
- link_local_ips=None):
+ link_local_ips=None, driver_opt=None):
"""
Connect a container to a network.
@@ -240,7 +240,8 @@ class NetworkApiMixin(object):
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
- ipv6_address=ipv6_address, link_local_ips=link_local_ips
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips,
+ driver_opt=driver_opt
),
}
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index f6c0b13..57110f1 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -1,9 +1,7 @@
-import six
-
from .. import auth, utils
-class PluginApiMixin(object):
+class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
@@ -21,7 +19,7 @@ class PluginApiMixin(object):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
- data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
diff --git a/docker/api/secret.py b/docker/api/secret.py
index e57952b..cd440b9 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -1,12 +1,10 @@
import base64
-import six
-
from .. import errors
from .. import utils
-class SecretApiMixin(object):
+class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
@@ -25,8 +23,7 @@ class SecretApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
diff --git a/docker/api/service.py b/docker/api/service.py
index e9027bf..371f541 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -45,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
- raise_version_error('force_update', '1.25')
+ raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
@@ -113,7 +113,7 @@ def _merge_task_template(current, override):
return merged
-class ServiceApiMixin(object):
+class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 897f08e..db40fdd 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,5 @@
import logging
-from six.moves import http_client
+import http.client as http_client
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
@@ -8,7 +8,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class SwarmApiMixin(object):
+class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
@@ -58,10 +58,10 @@ class SwarmApiMixin(object):
Example:
- >>> spec = client.create_swarm_spec(
+ >>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
- >>> client.init_swarm(
+ >>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
@@ -354,8 +354,8 @@ class SwarmApiMixin(object):
Example:
- >>> key = client.get_unlock_key()
- >>> client.unlock_node(key)
+ >>> key = client.api.get_unlock_key()
+ >>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
@@ -396,7 +396,7 @@ class SwarmApiMixin(object):
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
- >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
diff --git a/docker/api/volume.py b/docker/api/volume.py
index 900a608..86b0018 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -2,7 +2,7 @@ from .. import errors
from .. import utils
-class VolumeApiMixin(object):
+class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@@ -21,7 +21,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.volumes()
+ >>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
@@ -56,7 +56,7 @@ class VolumeApiMixin(object):
Example:
- >>> volume = cli.create_volume(name='foobar', driver='local',
+ >>> volume = client.api.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
@@ -104,7 +104,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.inspect_volume('foobar')
+ >>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
diff --git a/docker/auth.py b/docker/auth.py
index 6a07ea2..4fa798f 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -2,14 +2,12 @@ import base64
import json
import logging
-import six
-
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -18,13 +16,13 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid index name ({0}). Cannot begin or end with a'
+ 'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
@@ -98,10 +96,10 @@ class AuthConfig(dict):
"""
conf = {}
- for registry, entry in six.iteritems(entries):
+ for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
- 'Config entry for key {0} is not auth config'.format(
+ 'Config entry for key {} is not auth config'.format(
registry
)
)
@@ -111,14 +109,14 @@ class AuthConfig(dict):
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
- 'Invalid configuration for registry {0}'.format(
+ 'Invalid configuration for registry {}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
- 'Found an IdentityToken entry for registry {0}'.format(
+ 'Found an IdentityToken entry for registry {}'.format(
registry
)
)
@@ -132,7 +130,7 @@ class AuthConfig(dict):
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
- 'Auth data for {0} is absent. Client might be using a '
+ 'Auth data for {} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
@@ -140,7 +138,7 @@ class AuthConfig(dict):
username, password = decode_auth(entry['auth'])
log.debug(
- 'Found entry (registry={0}, username={1})'
+ 'Found entry (registry={}, username={})'
.format(repr(registry), repr(username))
)
@@ -170,7 +168,7 @@ class AuthConfig(dict):
try:
with open(config_file) as f:
config_dict = json.load(f)
- except (IOError, KeyError, ValueError) as e:
+ except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
@@ -230,7 +228,7 @@ class AuthConfig(dict):
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
- 'Using credentials store "{0}"'.format(store_name)
+ f'Using credentials store "{store_name}"'
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
@@ -239,15 +237,15 @@ class AuthConfig(dict):
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ log.debug(f"Looking for auth entry for {repr(registry)}")
if registry in self.auths:
- log.debug("Found {0}".format(repr(registry)))
+ log.debug(f"Found {repr(registry)}")
return self.auths[registry]
- for key, conf in six.iteritems(self.auths):
+ for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
- log.debug("Found {0}".format(repr(key)))
+ log.debug(f"Found {repr(key)}")
return conf
log.debug("No entry found")
@@ -258,7 +256,7 @@ class AuthConfig(dict):
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ log.debug(f"Looking for auth entry for {repr(registry)}")
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
@@ -278,7 +276,7 @@ class AuthConfig(dict):
return None
except credentials.StoreError as e:
raise errors.DockerException(
- 'Credentials store error: {0}'.format(repr(e))
+ f'Credentials store error: {repr(e)}'
)
def _get_store_instance(self, name):
@@ -329,7 +327,7 @@ def convert_to_hostname(url):
def decode_auth(auth):
- if isinstance(auth, six.string_types):
+ if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
diff --git a/docker/client.py b/docker/client.py
index 99ae196..4dbd846 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,5 +1,5 @@
from .api.client import APIClient
-from .constants import DEFAULT_TIMEOUT_SECONDS
+from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
@@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
-class DockerClient(object):
+class DockerClient:
"""
A client for communicating with a Docker server.
@@ -35,6 +35,11 @@ class DockerClient(object):
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@@ -62,14 +67,19 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.35``
+ automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is
+ made via shelling out to the ssh client. Ensure the ssh
+ client is installed and configured on the host.
Example:
@@ -80,9 +90,15 @@ class DockerClient(object):
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
+ max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
+ use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
- timeout=timeout, version=version, **kwargs_from_env(**kwargs)
+ timeout=timeout,
+ max_pool_size=max_pool_size,
+ version=version,
+ use_ssh_client=use_ssh_client,
+ **kwargs_from_env(**kwargs)
)
# Resources
@@ -196,7 +212,7 @@ class DockerClient(object):
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
- s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
diff --git a/docker/constants.py b/docker/constants.py
index 4b96e1c..d5bfc35 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.35'
+DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -9,6 +9,18 @@ CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
@@ -16,7 +28,7 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
-DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
+DEFAULT_USER_AGENT = f"docker-sdk-python/{version}"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
@@ -24,6 +36,8 @@ DEFAULT_NUM_POOLS = 25
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
+DEFAULT_MAX_POOL_SIZE = 10
+
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
diff --git a/docker/context/__init__.py b/docker/context/__init__.py
new file mode 100644
index 0000000..0a6707f
--- /dev/null
+++ b/docker/context/__init__.py
@@ -0,0 +1,3 @@
+# flake8: noqa
+from .context import Context
+from .api import ContextAPI
diff --git a/docker/context/api.py b/docker/context/api.py
new file mode 100644
index 0000000..380e8c4
--- /dev/null
+++ b/docker/context/api.py
@@ -0,0 +1,203 @@
+import json
+import os
+
+from docker import errors
+from docker.context.config import get_meta_dir
+from docker.context.config import METAFILE
+from docker.context.config import get_current_context_name
+from docker.context.config import write_context_name_to_docker_config
+from docker.context import Context
+
+
+class ContextAPI:
+ """Context API.
+ Contains methods for context management:
+ create, list, remove, get, inspect.
+ """
+ DEFAULT_CONTEXT = Context("default", "swarm")
+
+ @classmethod
+ def create_context(
+ cls, name, orchestrator=None, host=None, tls_cfg=None,
+ default_namespace=None, skip_tls_verify=False):
+ """Creates a new context.
+ Returns:
+ (Context): a Context object.
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextAlreadyExists`
+ If a context with the name already exists.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.create_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ '"default" is a reserved context name')
+ ctx = Context.load_context(name)
+ if ctx:
+ raise errors.ContextAlreadyExists(name)
+ endpoint = "docker"
+ if orchestrator and orchestrator != "swarm":
+ endpoint = orchestrator
+ ctx = Context(name, orchestrator)
+ ctx.set_endpoint(
+ endpoint, host, tls_cfg,
+ skip_tls_verify=skip_tls_verify,
+ def_namespace=default_namespace)
+ ctx.save()
+ return ctx
+
+ @classmethod
+ def get_context(cls, name=None):
+ """Retrieves a context object.
+ Args:
+ name (str): The name of the context
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.get_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ name = get_current_context_name()
+ if name == "default":
+ return cls.DEFAULT_CONTEXT
+ return Context.load_context(name)
+
+ @classmethod
+ def contexts(cls):
+ """Context list.
+ Returns:
+ (Context): List of context objects.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ names = []
+ for dirname, dirnames, fnames in os.walk(get_meta_dir()):
+ for filename in fnames + dirnames:
+ if filename == METAFILE:
+ try:
+ data = json.load(
+ open(os.path.join(dirname, filename)))
+ names.append(data["Name"])
+ except Exception as e:
+ raise errors.ContextException(
+ "Failed to load metafile {}: {}".format(
+ filename, e))
+
+ contexts = [cls.DEFAULT_CONTEXT]
+ for name in names:
+ contexts.append(Context.load_context(name))
+ return contexts
+
+ @classmethod
+ def get_current_context(cls):
+ """Get current context.
+ Returns:
+ (Context): current context object.
+ """
+ return cls.get_context()
+
+ @classmethod
+ def set_current_context(cls, name="default"):
+ ctx = cls.get_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ err = write_context_name_to_docker_config(name)
+ if err:
+ raise errors.ContextException(
+ f'Failed to set current context: {err}')
+
+ @classmethod
+ def remove_context(cls, name):
+ """Remove a context. Similar to the ``docker context rm`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ 'context "default" cannot be removed')
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+ if name == get_current_context_name():
+ write_context_name_to_docker_config(None)
+ ctx.remove()
+
+ @classmethod
+ def inspect_context(cls, name="default"):
+ """Remove a context. Similar to the ``docker context inspect`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ return cls.DEFAULT_CONTEXT()
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ return ctx()
diff --git a/docker/context/config.py b/docker/context/config.py
new file mode 100644
index 0000000..d761aef
--- /dev/null
+++ b/docker/context/config.py
@@ -0,0 +1,81 @@
+import os
+import json
+import hashlib
+
+from docker import utils
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.constants import DEFAULT_UNIX_SOCKET
+from docker.utils.config import find_config_file
+
+METAFILE = "meta.json"
+
+
+def get_current_context_name():
+ name = "default"
+ docker_cfg_path = find_config_file()
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ name = json.load(f).get("currentContext", "default")
+ except Exception:
+ return "default"
+ return name
+
+
+def write_context_name_to_docker_config(name=None):
+ if name == 'default':
+ name = None
+ docker_cfg_path = find_config_file()
+ config = {}
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ config = json.load(f)
+ except Exception as e:
+ return e
+ current_context = config.get("currentContext", None)
+ if current_context and not name:
+ del config["currentContext"]
+ elif name:
+ config["currentContext"] = name
+ else:
+ return
+ try:
+ with open(docker_cfg_path, "w") as f:
+ json.dump(config, f, indent=4)
+ except Exception as e:
+ return e
+
+
+def get_context_id(name):
+ return hashlib.sha256(name.encode('utf-8')).hexdigest()
+
+
+def get_context_dir():
+ return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
+
+
+def get_meta_dir(name=None):
+ meta_dir = os.path.join(get_context_dir(), "meta")
+ if name:
+ return os.path.join(meta_dir, get_context_id(name))
+ return meta_dir
+
+
+def get_meta_file(name):
+ return os.path.join(get_meta_dir(name), METAFILE)
+
+
+def get_tls_dir(name=None, endpoint=""):
+ context_dir = get_context_dir()
+ if name:
+ return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
+ return os.path.join(context_dir, "tls")
+
+
+def get_context_host(path=None, tls=False):
+ host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
+ if host == DEFAULT_UNIX_SOCKET:
+ # remove http+ from default docker socket url
+ return host.strip("http+")
+ return host
diff --git a/docker/context/context.py b/docker/context/context.py
new file mode 100644
index 0000000..dbaa01c
--- /dev/null
+++ b/docker/context/context.py
@@ -0,0 +1,243 @@
+import os
+import json
+from shutil import copyfile, rmtree
+from docker.tls import TLSConfig
+from docker.errors import ContextException
+from docker.context.config import get_meta_dir
+from docker.context.config import get_meta_file
+from docker.context.config import get_tls_dir
+from docker.context.config import get_context_host
+
+
+class Context:
+ """A context."""
+
+ def __init__(self, name, orchestrator=None, host=None, endpoints=None,
+ tls=False):
+ if not name:
+ raise Exception("Name not provided")
+ self.name = name
+ self.context_type = None
+ self.orchestrator = orchestrator
+ self.endpoints = {}
+ self.tls_cfg = {}
+ self.meta_path = "IN MEMORY"
+ self.tls_path = "IN MEMORY"
+
+ if not endpoints:
+ # set default docker endpoint if no endpoint is set
+ default_endpoint = "docker" if (
+ not orchestrator or orchestrator == "swarm"
+ ) else orchestrator
+
+ self.endpoints = {
+ default_endpoint: {
+ "Host": get_context_host(host, tls),
+ "SkipTLSVerify": not tls
+ }
+ }
+ return
+
+ # check docker endpoints
+ for k, v in endpoints.items():
+ if not isinstance(v, dict):
+ # unknown format
+ raise ContextException("""Unknown endpoint format for
+ context {}: {}""".format(name, v))
+
+ self.endpoints[k] = v
+ if k != "docker":
+ continue
+
+ self.endpoints[k]["Host"] = v.get("Host", get_context_host(
+ host, tls))
+ self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
+ "SkipTLSVerify", not tls))
+
+ def set_endpoint(
+ self, name="docker", host=None, tls_cfg=None,
+ skip_tls_verify=False, def_namespace=None):
+ self.endpoints[name] = {
+ "Host": get_context_host(host, not skip_tls_verify),
+ "SkipTLSVerify": skip_tls_verify
+ }
+ if def_namespace:
+ self.endpoints[name]["DefaultNamespace"] = def_namespace
+
+ if tls_cfg:
+ self.tls_cfg[name] = tls_cfg
+
+ def inspect(self):
+ return self.__call__()
+
+ @classmethod
+ def load_context(cls, name):
+ meta = Context._load_meta(name)
+ if meta:
+ instance = cls(
+ meta["Name"],
+ orchestrator=meta["Metadata"].get("StackOrchestrator", None),
+ endpoints=meta.get("Endpoints", None))
+ instance.context_type = meta["Metadata"].get("Type", None)
+ instance._load_certs()
+ instance.meta_path = get_meta_dir(name)
+ return instance
+ return None
+
+ @classmethod
+ def _load_meta(cls, name):
+ meta_file = get_meta_file(name)
+ if not os.path.isfile(meta_file):
+ return None
+
+ metadata = {}
+ try:
+ with open(meta_file) as f:
+ metadata = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # unknown format
+ raise Exception("""Detected corrupted meta file for
+ context {} : {}""".format(name, e))
+
+ # for docker endpoints, set defaults for
+ # Host and SkipTLSVerify fields
+ for k, v in metadata["Endpoints"].items():
+ if k != "docker":
+ continue
+ metadata["Endpoints"][k]["Host"] = v.get(
+ "Host", get_context_host(None, False))
+ metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
+ v.get("SkipTLSVerify", True))
+
+ return metadata
+
+ def _load_certs(self):
+ certs = {}
+ tls_dir = get_tls_dir(self.name)
+ for endpoint in self.endpoints.keys():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ continue
+ ca_cert = None
+ cert = None
+ key = None
+ for filename in os.listdir(os.path.join(tls_dir, endpoint)):
+ if filename.startswith("ca"):
+ ca_cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("cert"):
+ cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("key"):
+ key = os.path.join(tls_dir, endpoint, filename)
+ if all([ca_cert, cert, key]):
+ verify = None
+ if endpoint == "docker" and not self.endpoints["docker"].get(
+ "SkipTLSVerify", False):
+ verify = True
+ certs[endpoint] = TLSConfig(
+ client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
+ self.tls_cfg = certs
+ self.tls_path = tls_dir
+
+ def save(self):
+ meta_dir = get_meta_dir(self.name)
+ if not os.path.isdir(meta_dir):
+ os.makedirs(meta_dir)
+ with open(get_meta_file(self.name), "w") as f:
+ f.write(json.dumps(self.Metadata))
+
+ tls_dir = get_tls_dir(self.name)
+ for endpoint, tls in self.tls_cfg.items():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ os.makedirs(os.path.join(tls_dir, endpoint))
+
+ ca_file = tls.ca_cert
+ if ca_file:
+ copyfile(ca_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(ca_file)))
+
+ if tls.cert:
+ cert_file, key_file = tls.cert
+ copyfile(cert_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(cert_file)))
+ copyfile(key_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(key_file)))
+
+ self.meta_path = get_meta_dir(self.name)
+ self.tls_path = get_tls_dir(self.name)
+
+ def remove(self):
+ if os.path.isdir(self.meta_path):
+ rmtree(self.meta_path)
+ if os.path.isdir(self.tls_path):
+ rmtree(self.tls_path)
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ def __str__(self):
+ return json.dumps(self.__call__(), indent=2)
+
+ def __call__(self):
+ result = self.Metadata
+ result.update(self.TLSMaterial)
+ result.update(self.Storage)
+ return result
+
+ def is_docker_host(self):
+ return self.context_type is None
+
+ @property
+ def Name(self):
+ return self.name
+
+ @property
+ def Host(self):
+ if not self.orchestrator or self.orchestrator == "swarm":
+ endpoint = self.endpoints.get("docker", None)
+ if endpoint:
+ return endpoint.get("Host", None)
+ return None
+
+ return self.endpoints[self.orchestrator].get("Host", None)
+
+ @property
+ def Orchestrator(self):
+ return self.orchestrator
+
+ @property
+ def Metadata(self):
+ meta = {}
+ if self.orchestrator:
+ meta = {"StackOrchestrator": self.orchestrator}
+ return {
+ "Name": self.name,
+ "Metadata": meta,
+ "Endpoints": self.endpoints
+ }
+
+ @property
+ def TLSConfig(self):
+ key = self.orchestrator
+ if not key or key == "swarm":
+ key = "docker"
+ if key in self.tls_cfg.keys():
+ return self.tls_cfg[key]
+ return None
+
+ @property
+ def TLSMaterial(self):
+ certs = {}
+ for endpoint, tls in self.tls_cfg.items():
+ cert, key = tls.cert
+ certs[endpoint] = list(
+ map(os.path.basename, [tls.ca_cert, cert, key]))
+ return {
+ "TLSMaterial": certs
+ }
+
+ @property
+ def Storage(self):
+ return {
+ "Storage": {
+ "MetadataPath": self.meta_path,
+ "TLSPath": self.tls_path
+ }}
diff --git a/docker/credentials/store.py b/docker/credentials/store.py
index 0017888..e55976f 100644
--- a/docker/credentials/store.py
+++ b/docker/credentials/store.py
@@ -2,15 +2,13 @@ import errno
import json
import subprocess
-import six
-
from . import constants
from . import errors
from .utils import create_environment_dict
from .utils import find_executable
-class Store(object):
+class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
@@ -30,7 +28,7 @@ class Store(object):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
- if not isinstance(server, six.binary_type):
+ if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
@@ -41,7 +39,7 @@ class Store(object):
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
- 'No matching credentials in {}'.format(self.program)
+ f'No matching credentials in {self.program}'
)
return result
@@ -61,7 +59,7 @@ class Store(object):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
- if not isinstance(server, six.binary_type):
+ if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
@@ -75,20 +73,9 @@ class Store(object):
output = None
env = create_environment_dict(self.environment)
try:
- if six.PY3:
- output = subprocess.check_output(
- [self.exe, subcmd], input=data_input, env=env,
- )
- else:
- process = subprocess.Popen(
- [self.exe, subcmd], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, env=env,
- )
- output, _ = process.communicate(data_input)
- if process.returncode != 0:
- raise subprocess.CalledProcessError(
- returncode=process.returncode, cmd='', output=output
- )
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:
diff --git a/docker/errors.py b/docker/errors.py
index c340dcb..ba95256 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -38,23 +38,25 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
- super(APIError, self).__init__(message)
+ super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
- message = super(APIError, self).__str__()
+ message = super().__str__()
if self.is_client_error():
- message = '{0} Client Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Client Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
elif self.is_server_error():
- message = '{0} Server Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Server Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
if self.explanation:
- message = '{0} ("{1}")'.format(message, self.explanation)
+ message = f'{message} ("{self.explanation}")'
return message
@@ -131,11 +133,11 @@ class ContainerError(DockerException):
self.image = image
self.stderr = stderr
- err = ": {}".format(stderr) if stderr is not None else ""
+ err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
- super(ContainerError, self).__init__(msg)
+ super().__init__(msg)
class StreamParseError(RuntimeError):
@@ -145,7 +147,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException):
def __init__(self, reason, build_log):
- super(BuildError, self).__init__(reason)
+ super().__init__(reason)
self.msg = reason
self.build_log = build_log
@@ -155,11 +157,43 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs):
- quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
- text = ["{}() ".format(name)]
+ quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
+ text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return (f"missing parameter: {self.param}")
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context {self.name} already exists")
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context '{self.name}' not found")
diff --git a/docker/models/configs.py b/docker/models/configs.py
index 7f23f65..3588c8b 100644
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
diff --git a/docker/models/containers.py b/docker/models/containers.py
index d1f275f..957deed 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -225,7 +225,8 @@ class Container(Model):
"""
return self.client.api.export(self.id, chunk_size)
- def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
@@ -235,6 +236,8 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -255,7 +258,8 @@ class Container(Model):
... f.write(chunk)
>>> f.close()
"""
- return self.client.api.get_archive(self.id, path, chunk_size)
+ return self.client.api.get_archive(self.id, path,
+ chunk_size, encode_stream)
def kill(self, signal=None):
"""
@@ -579,6 +583,9 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
@@ -642,6 +649,7 @@ class ContainerCollection(Collection):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``ports``.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
@@ -675,6 +683,7 @@ class ContainerCollection(Collection):
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
+ Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -752,6 +761,14 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+ Or a list of strings which each one of its elements specifies a mount volume.
+
+ For example:
+
+ .. code-block:: python
+
+ ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
+
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
@@ -998,6 +1015,7 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps',
'device_write_iops',
'devices',
+ 'device_requests',
'dns_opt',
'dns_search',
'dns',
diff --git a/docker/models/images.py b/docker/models/images.py
index 757a5a4..46f8efe 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -2,8 +2,6 @@ import itertools
import re
import warnings
-import six
-
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError, InvalidArgument
@@ -17,7 +15,7 @@ class Image(Model):
An image on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+ return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
@@ -84,19 +82,19 @@ class Image(Model):
Example:
- >>> image = cli.get_image("busybox:latest")
+ >>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
- >>> for chunk in image:
+ >>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
- if isinstance(named, six.string_types):
+ if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
- "{} is not a valid tag for this image".format(named)
+ f"{named} is not a valid tag for this image"
)
img = named
@@ -127,7 +125,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
- super(RegistryData, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.image_name = image_name
@property
@@ -180,7 +178,7 @@ class RegistryData(Model):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
- '"{0}" is not a valid platform descriptor'.format(platform)
+ f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
@@ -277,7 +275,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
- if isinstance(resp, six.string_types):
+ if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
@@ -395,12 +393,13 @@ class ImageCollection(Collection):
return [self.get(i) for i in images]
- def pull(self, repository, tag=None, **kwargs):
+ def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
- If no tag is specified, all tags from that repository will be
- pulled.
+ If ``tag`` is ``None`` or empty, it is set to ``latest``.
+ If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
+ tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@@ -413,10 +412,11 @@ class ImageCollection(Collection):
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
- If no ``tag`` was specified, the method will return a list
+ If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
@@ -426,13 +426,13 @@ class ImageCollection(Collection):
Example:
>>> # Pull the image tagged `latest` in the busybox repo
- >>> image = client.images.pull('busybox:latest')
+ >>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
- >>> images = client.images.pull('busybox')
+ >>> images = client.images.pull('busybox', all_tags=True)
"""
- if not tag:
- repository, tag = parse_repository_tag(repository)
+ repository, image_tag = parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
if 'stream' in kwargs:
warnings.warn(
@@ -442,14 +442,14 @@ class ImageCollection(Collection):
del kwargs['stream']
pull_log = self.client.api.pull(
- repository, tag=tag, stream=True, **kwargs
+ repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
- if tag:
+ if not all_tags:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
diff --git a/docker/models/networks.py b/docker/models/networks.py
index f944c8e..093deb7 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -46,6 +46,8 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
index 0688018..37ecefb 100644
--- a/docker/models/plugins.py
+++ b/docker/models/plugins.py
@@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -117,9 +117,8 @@ class Plugin(Model):
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
- for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
- yield d
- self._reload()
+ yield from self.client.api.upgrade_plugin(self.name, remote, privileges)
+ self.reload()
class PluginCollection(Collection):
diff --git a/docker/models/resource.py b/docker/models/resource.py
index ed3900a..dec2349 100644
--- a/docker/models/resource.py
+++ b/docker/models/resource.py
@@ -1,5 +1,4 @@
-
-class Model(object):
+class Model:
"""
A base class for representing a single object on the server.
"""
@@ -18,13 +17,13 @@ class Model(object):
self.attrs = {}
def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+ return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
- return hash("%s:%s" % (self.__class__.__name__, self.id))
+ return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
@@ -49,7 +48,7 @@ class Model(object):
self.attrs = new_model.attrs
-class Collection(object):
+class Collection:
"""
A base class for representing all objects of a particular type on the
server.
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
index ca11ede..da01d44 100644
--- a/docker/models/secrets.py
+++ b/docker/models/secrets.py
@@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -30,6 +30,7 @@ class SecretCollection(Collection):
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
diff --git a/docker/models/services.py b/docker/models/services.py
index a35687b..200dd33 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -157,6 +157,8 @@ class ServiceCollection(Collection):
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
+ maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
+ or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
@@ -211,6 +213,10 @@ class ServiceCollection(Collection):
to the service.
privileges (Privileges): Security options for the service's
containers.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to
+ the default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop
+ from the default set for the container.
Returns:
:py:class:`Service`: The created service.
@@ -275,6 +281,8 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
+ 'cap_add',
+ 'cap_drop',
'command',
'configs',
'dns_config',
@@ -319,6 +327,7 @@ PLACEMENT_KWARGS = [
'constraints',
'preferences',
'platforms',
+ 'maxreplicas',
]
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index 755c17d..b0b1a2e 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -11,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
- super(Swarm, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
diff --git a/docker/tls.py b/docker/tls.py
index d4671d1..067d556 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -5,7 +5,7 @@ from . import errors
from .transport import SSLHTTPAdapter
-class TLSConfig(object):
+class TLSConfig:
"""
TLS configuration.
@@ -32,7 +32,7 @@ class TLSConfig(object):
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
- # leaving tls_verify=False
+ # leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -62,7 +62,7 @@ class TLSConfig(object):
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
- # "tls" and "tls_verify" must have both or neither cert/key files In
+ # "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
@@ -71,7 +71,7 @@ class TLSConfig(object):
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
- 'client_config must be a tuple of'
+ 'client_cert must be a tuple of'
' (client certificate, key file)'
)
@@ -79,7 +79,7 @@ class TLSConfig(object):
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
- ' through the client_config param'
+ ' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
@@ -88,7 +88,7 @@ class TLSConfig(object):
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
- 'Invalid CA certificate provided for `tls_ca_cert`.'
+ 'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
index 4d819b6..dfbb193 100644
--- a/docker/transport/basehttpadapter.py
+++ b/docker/transport/basehttpadapter.py
@@ -3,6 +3,6 @@ import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
- super(BaseHTTPAdapter, self).close()
+ super().close()
if hasattr(self, 'pools'):
self.pools.clear()
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index aa05538..df67f21 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,14 +1,11 @@
-import six
+import queue
import requests.adapters
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -18,9 +15,9 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class NpipeHTTPConnection(httplib.HTTPConnection, object):
+class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
- super(NpipeHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
@@ -35,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
- super(NpipeHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
@@ -57,7 +54,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -73,16 +70,19 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, base_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(NpipeHTTPAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -91,7 +91,8 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
return pool
pool = NpipeHTTPConnectionPool(
- self.npipe_path, self.timeout
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index ef02031..766372a 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -1,7 +1,7 @@
import functools
+import time
import io
-import six
import win32file
import win32pipe
@@ -9,7 +9,7 @@ cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
-RETRY_WAIT_TIMEOUT = 10000
+MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@@ -23,7 +23,7 @@ def check_closed(f):
return wrapped
-class NpipeSocket(object):
+class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
@@ -46,8 +46,7 @@ class NpipeSocket(object):
self._closed = True
@check_closed
- def connect(self, address):
- win32pipe.WaitNamedPipe(address, self._timeout)
+ def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
@@ -65,8 +64,10 @@ class NpipeSocket(object):
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
- win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
- return self.connect(address)
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
@@ -126,9 +127,6 @@ class NpipeSocket(object):
@check_closed
def recv_into(self, buf, nbytes=0):
- if six.PY2:
- return self._recv_into_py2(buf, nbytes)
-
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
@@ -193,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket
def close(self):
- super(NpipeFileIOBase, self).close()
+ super().close()
self.sock = None
def fileno(self):
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py
index 5a8ceb0..8e6beb2 100644
--- a/docker/transport/sshconn.py
+++ b/docker/transport/sshconn.py
@@ -1,14 +1,17 @@
import paramiko
+import queue
+import urllib.parse
import requests.adapters
-import six
+import logging
+import os
+import signal
+import socket
+import subprocess
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -18,33 +21,122 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class SSHConnection(httplib.HTTPConnection, object):
- def __init__(self, ssh_transport, timeout=60):
- super(SSHConnection, self).__init__(
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super().__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ ' '.join(args),
+ env=env,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=None if constants.IS_WINDOWS_PLATFORM else preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(httplib.HTTPConnection):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
+ self.ssh_host = host
def connect(self):
- sock = self.ssh_transport.open_session()
- sock.settimeout(self.timeout)
- sock.exec_command('docker system dial-stdio')
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
- def __init__(self, ssh_client, timeout=60, maxsize=10):
- super(SSHConnectionPool, self).__init__(
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
- self.ssh_transport = ssh_client.get_transport()
+ self.ssh_transport = None
self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
def _new_conn(self):
- return SSHConnection(self.ssh_transport, self.timeout)
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
@@ -57,7 +149,7 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -72,45 +164,92 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
- 'pools', 'timeout', 'ssh_client',
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
- self.ssh_client = paramiko.SSHClient()
- self.ssh_client.load_system_host_keys()
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
- self.base_url = base_url
- self._connect()
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(SSHHTTPAdapter, self).__init__()
+ super().__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urllib.parse.urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ self.ssh_conf['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
def _connect(self):
- parsed = six.moves.urllib_parse.urlparse(self.base_url)
- self.ssh_client.connect(
- parsed.hostname, parsed.port, parsed.username,
- )
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
- if not self.ssh_client.get_transport():
+ if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
- self.ssh_client, self.timeout
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
- super(SSHHTTPAdapter, self).close()
- self.ssh_client.close()
+ super().close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
index 12de76c..31e3014 100644
--- a/docker/transport/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -36,7 +36,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- super(SSLHTTPAdapter, self).__init__(**kwargs)
+ super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@@ -59,7 +59,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
- conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
+ conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index b619103..1b00762 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,7 +1,6 @@
-import six
import requests.adapters
import socket
-from six.moves import http_client as httplib
+import http.client as httplib
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
@@ -15,27 +14,15 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class UnixHTTPResponse(httplib.HTTPResponse, object):
- def __init__(self, sock, *args, **kwargs):
- disable_buffering = kwargs.pop('disable_buffering', False)
- if six.PY2:
- # FIXME: We may need to disable buffering on Py3 as well,
- # but there's no clear way to do it at the moment. See:
- # https://github.com/docker/docker-py/issues/1799
- kwargs['buffering'] = not disable_buffering
- super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
-
-
-class UnixHTTPConnection(httplib.HTTPConnection, object):
+class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
- super(UnixHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
- self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -44,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.sock = sock
def putheader(self, header, *values):
- super(UnixHTTPConnection, self).putheader(header, *values)
- if header == 'Connection' and 'Upgrade' in values:
- self.disable_buffering = True
+ super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
- if self.disable_buffering:
- kwargs['disable_buffering'] = True
-
- return UnixHTTPResponse(sock, *args, **kwargs)
+ return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
- super(UnixHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
@@ -74,19 +56,22 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, socket_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(UnixHTTPAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -95,7 +80,8 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
return pool
pool = UnixHTTPConnectionPool(
- url, self.socket_path, self.timeout
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 5db330e..b425746 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,5 +1,7 @@
# flake8: noqa
-from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .containers import (
+ ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
+)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
diff --git a/docker/types/base.py b/docker/types/base.py
index 6891062..8851f1e 100644
--- a/docker/types/base.py
+++ b/docker/types/base.py
@@ -1,7 +1,4 @@
-import six
-
-
class DictType(dict):
def __init__(self, init):
- for k, v in six.iteritems(init):
+ for k, v in init.items():
self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
index fd8cab4..f1b60b2 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
@@ -10,7 +8,7 @@ from .base import DictType
from .healthcheck import Healthcheck
-class LogConfigTypesEnum(object):
+class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
@@ -61,7 +59,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
- super(LogConfig, self).__init__({
+ super().__init__({
'Type': log_driver_type,
'Config': config
})
@@ -97,8 +95,8 @@ class Ulimit(DictType):
Args:
- name (str): Which ulimit will this apply to. A list of valid names can
- be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_.
+ name (str): Which ulimit will this apply to. The valid names can be
+ found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
@@ -117,13 +115,13 @@ class Ulimit(DictType):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
- if not isinstance(name, six.string_types):
+ if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
- super(Ulimit, self).__init__({
+ super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
@@ -154,6 +152,104 @@ class Ulimit(DictType):
self['Hard'] = value
+class DeviceRequest(DictType):
+ """
+ Create a device request to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ driver (str): Which driver to use for this device. Optional.
+ count (int): Number or devices to request. Optional.
+ Set to -1 to request all available devices.
+ device_ids (list): List of strings for device IDs. Optional.
+ Set either ``count`` or ``device_ids``.
+ capabilities (list): List of lists of strings to request
+ capabilities. Optional. The global list acts like an OR,
+ and the sub-lists are AND. The driver will try to satisfy
+ one of the sub-lists.
+ Available capabilities for the ``nvidia`` driver can be found
+ `here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
+ options (dict): Driver-specific options. Optional.
+ """
+
+ def __init__(self, **kwargs):
+ driver = kwargs.get('driver', kwargs.get('Driver'))
+ count = kwargs.get('count', kwargs.get('Count'))
+ device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
+ capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
+ options = kwargs.get('options', kwargs.get('Options'))
+
+ if driver is None:
+ driver = ''
+ elif not isinstance(driver, str):
+ raise ValueError('DeviceRequest.driver must be a string')
+ if count is None:
+ count = 0
+ elif not isinstance(count, int):
+ raise ValueError('DeviceRequest.count must be an integer')
+ if device_ids is None:
+ device_ids = []
+ elif not isinstance(device_ids, list):
+ raise ValueError('DeviceRequest.device_ids must be a list')
+ if capabilities is None:
+ capabilities = []
+ elif not isinstance(capabilities, list):
+ raise ValueError('DeviceRequest.capabilities must be a list')
+ if options is None:
+ options = {}
+ elif not isinstance(options, dict):
+ raise ValueError('DeviceRequest.options must be a dict')
+
+ super().__init__({
+ 'Driver': driver,
+ 'Count': count,
+ 'DeviceIDs': device_ids,
+ 'Capabilities': capabilities,
+ 'Options': options
+ })
+
+ @property
+ def driver(self):
+ return self['Driver']
+
+ @driver.setter
+ def driver(self, value):
+ self['Driver'] = value
+
+ @property
+ def count(self):
+ return self['Count']
+
+ @count.setter
+ def count(self, value):
+ self['Count'] = value
+
+ @property
+ def device_ids(self):
+ return self['DeviceIDs']
+
+ @device_ids.setter
+ def device_ids(self, value):
+ self['DeviceIDs'] = value
+
+ @property
+ def capabilities(self):
+ return self['Capabilities']
+
+ @capabilities.setter
+ def capabilities(self, value):
+ self['Capabilities'] = value
+
+ @property
+ def options(self):
+ return self['Options']
+
+ @options.setter
+ def options(self, value):
+ self['Options'] = value
+
+
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
@@ -176,7 +272,7 @@ class HostConfig(dict):
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
- device_cgroup_rules=None):
+ device_cgroup_rules=None, device_requests=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@@ -199,7 +295,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
- if isinstance(shm_size, six.string_types):
+ if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
@@ -236,10 +332,11 @@ class HostConfig(dict):
if dns_search:
self['DnsSearch'] = dns_search
- if network_mode:
- self['NetworkMode'] = network_mode
- elif network_mode is None:
- self['NetworkMode'] = 'default'
+ if network_mode == 'host' and port_bindings:
+ raise host_config_incompatible_error(
+ 'network_mode', 'host', 'port_bindings'
+ )
+ self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
@@ -259,7 +356,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
- self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+ self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
@@ -279,11 +376,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- self['Sysctls'][k] = six.text_type(v)
+ for k, v in sysctls.items():
+ self['Sysctls'][k] = str(v)
if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
+ if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
@@ -305,7 +402,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict):
formatted = []
- for k, v in six.iteritems(lxc_conf):
+ for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
@@ -460,7 +557,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit
if isolation:
- if not isinstance(isolation, six.string_types):
+ if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
@@ -510,7 +607,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent
if nano_cpus:
- if not isinstance(nano_cpus, six.integer_types):
+ if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
@@ -536,6 +633,19 @@ class HostConfig(dict):
)
self['DeviceCgroupRules'] = device_cgroup_rules
+ if device_requests is not None:
+ if version_lt(version, '1.40'):
+ raise host_config_version_error('device_requests', '1.40')
+ if not isinstance(device_requests, list):
+ raise host_config_type_error(
+ 'device_requests', device_requests, 'list'
+ )
+ self['DeviceRequests'] = []
+ for req in device_requests:
+ if not isinstance(req, DeviceRequest):
+ req = DeviceRequest(**req)
+ self['DeviceRequests'].append(req)
+
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@@ -553,6 +663,13 @@ def host_config_value_error(param, param_value):
return ValueError(error_msg.format(param, param_value))
+def host_config_incompatible_error(param, param_value, incompatible_param):
+ error_msg = '\"{1}\" {0} is incompatible with {2}'
+ return errors.InvalidArgument(
+ error_msg.format(param, param_value, incompatible_param)
+ )
+
+
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
@@ -580,17 +697,17 @@ class ContainerConfig(dict):
'version 1.29'
)
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
- if isinstance(entrypoint, six.string_types):
+ if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
+ labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
@@ -601,10 +718,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
@@ -633,7 +750,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': six.text_type(user) if user is not None else None,
+ 'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index af3e5bc..10e8101 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -8,7 +8,7 @@ except ImportError:
from ..errors import DockerException
-class CancellableStream(object):
+class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
@@ -32,7 +32,7 @@ class CancellableStream(object):
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
- except socket.error:
+ except OSError:
raise StopIteration
next = __next__
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 9815018..dfc88a9 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -1,7 +1,5 @@
from .base import DictType
-import six
-
class Healthcheck(DictType):
"""
@@ -31,7 +29,7 @@ class Healthcheck(DictType):
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
- if isinstance(test, six.string_types):
+ if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
@@ -39,7 +37,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
- super(Healthcheck, self).__init__({
+ super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
@@ -53,7 +51,7 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
value = ["CMD-SHELL", value]
self['Test'] = value
diff --git a/docker/types/networks.py b/docker/types/networks.py
index 1c7b2c9..1370dc1 100644
--- a/docker/types/networks.py
+++ b/docker/types/networks.py
@@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
- ipv6_address=None, link_local_ips=None):
+ ipv6_address=None, link_local_ips=None, driver_opt=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@@ -33,6 +33,15 @@ class EndpointConfig(dict):
if ipam_config:
self['IPAMConfig'] = ipam_config
+ if driver_opt:
+ if version_lt(version, '1.32'):
+ raise errors.InvalidVersion(
+ 'DriverOpts is not supported for API version < 1.32'
+ )
+ if not isinstance(driver_opt, dict):
+ raise TypeError('driver_opt must be a dictionary')
+ self['DriverOpts'] = driver_opt
+
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):
diff --git a/docker/types/services.py b/docker/types/services.py
index 05dda15..fe7cc26 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
@@ -112,16 +110,21 @@ class ContainerSpec(dict):
containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to the
+ default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
+ the default set for the container.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
- privileges=None, isolation=None, init=None):
+ privileges=None, isolation=None, init=None, cap_add=None,
+ cap_drop=None):
self['Image'] = image
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
@@ -151,7 +154,7 @@ class ContainerSpec(dict):
if mounts is not None:
parsed_mounts = []
for mount in mounts:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
@@ -188,6 +191,18 @@ class ContainerSpec(dict):
if init is not None:
self['Init'] = init
+ if cap_add is not None:
+ if not isinstance(cap_add, list):
+ raise TypeError('cap_add must be a list')
+
+ self['CapabilityAdd'] = cap_add
+
+ if cap_drop is not None:
+ if not isinstance(cap_drop, list):
+ raise TypeError('cap_drop must be a list')
+
+ self['CapabilityDrop'] = cap_drop
+
class Mount(dict):
"""
@@ -224,7 +239,7 @@ class Mount(dict):
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
- 'Unsupported mount type: "{}"'.format(type)
+ f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
@@ -260,7 +275,7 @@ class Mount(dict):
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
- if not isinstance(tmpfs_mode, six.integer_types):
+ if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
@@ -280,7 +295,7 @@ class Mount(dict):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
- 'Invalid mount format "{0}"'.format(string)
+ f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
@@ -347,7 +362,7 @@ def _convert_generic_resources_dict(generic_resources):
' (found {})'.format(type(generic_resources))
)
resources = []
- for kind, value in six.iteritems(generic_resources):
+ for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
@@ -443,7 +458,7 @@ class RollbackConfig(UpdateConfig):
pass
-class RestartConditionTypesEnum(object):
+class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
@@ -474,7 +489,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
- 'Invalid RestartPolicy condition {0}'.format(condition)
+ f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
@@ -533,7 +548,7 @@ def convert_service_ports(ports):
)
result = []
- for k, v in six.iteritems(ports):
+ for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
@@ -659,10 +674,12 @@ class Placement(dict):
are provided in order from highest to lowest precedence and
are expressed as ``(strategy, descriptor)`` tuples. See
:py:class:`PlacementPreference` for details.
+ maxreplicas (int): Maximum number of replicas per node
platforms (:py:class:`list` of tuple): A list of platforms
expressed as ``(arch, os)`` tuples
"""
- def __init__(self, constraints=None, preferences=None, platforms=None):
+ def __init__(self, constraints=None, preferences=None, platforms=None,
+ maxreplicas=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
@@ -671,6 +688,8 @@ class Placement(dict):
if isinstance(pref, tuple):
pref = PlacementPreference(*pref)
self['Preferences'].append(pref)
+ if maxreplicas is not None:
+ self['MaxReplicas'] = maxreplicas
if platforms:
self['Platforms'] = []
for plat in platforms:
diff --git a/docker/utils/build.py b/docker/utils/build.py
index 4fa5751..ac06043 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -4,8 +4,6 @@ import re
import tarfile
import tempfile
-import six
-
from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
@@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
- extra_names = set(e[0] for e in extra_files)
+ extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
@@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
- except IOError:
- raise IOError(
- 'Can not read file in context: {}'.format(full_path)
+ except OSError:
+ raise OSError(
+ f'Can not read file in context: {full_path}'
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
@@ -105,8 +103,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
for name, contents in extra_files:
info = tarfile.TarInfo(name)
- info.size = len(contents)
- t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
@@ -118,12 +117,8 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
@@ -153,7 +148,7 @@ def walk(root, patterns, default=True):
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
-class PatternMatcher(object):
+class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
@@ -211,13 +206,12 @@ class PatternMatcher(object):
break
if skip:
continue
- for sub in rec_walk(cur):
- yield sub
+ yield from rec_walk(cur)
return rec_walk(root)
-class Pattern(object):
+class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
diff --git a/docker/utils/config.py b/docker/utils/config.py
index 82a0e2a..8e24959 100644
--- a/docker/utils/config.py
+++ b/docker/utils/config.py
@@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
- log.debug("Trying paths: {0}".format(repr(paths)))
+ log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
+ log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
@@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try:
with open(config_file) as f:
return json.load(f)
- except (IOError, ValueError) as e:
+ except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index c975d4b..cf1baf4 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -27,7 +27,7 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
- '{0} is not available for version < {1}'.format(
+ '{} is not available for version < {}'.format(
f.__name__, version
)
)
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
index cc940a2..90e9f60 100644
--- a/docker/utils/fnmatch.py
+++ b/docker/utils/fnmatch.py
@@ -108,7 +108,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
+ res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
index addffdf..f384175 100644
--- a/docker/utils/json_stream.py
+++ b/docker/utils/json_stream.py
@@ -1,11 +1,6 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import json
import json.decoder
-import six
-
from ..errors import StreamParseError
@@ -20,7 +15,7 @@ def stream_as_text(stream):
instead of byte streams.
"""
for data in stream:
- if not isinstance(data, six.text_type):
+ if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
@@ -46,8 +41,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode)
-def line_splitter(buffer, separator=u'\n'):
- index = buffer.find(six.text_type(separator))
+def line_splitter(buffer, separator='\n'):
+ index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
@@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input.
"""
splitter = splitter or line_splitter
- buffered = six.text_type('')
+ buffered = ''
for data in stream_as_text(stream):
buffered += data
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index a50cc02..e813936 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -3,7 +3,7 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
- r"((?P<host>[a-fA-F\d.:]+):)?" # Address
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
@@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end:
return [start + proto]
if randomly_available_port:
- return ['{}-{}'.format(start, end) + proto]
+ return [f'{start}-{end}' + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 7ba9505..4a2076e 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -4,8 +4,6 @@ import select
import socket as pysocket
import struct
-import six
-
try:
from ..transport import NpipeSocket
except ImportError:
@@ -27,16 +25,16 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- if six.PY3 and not isinstance(socket, NpipeSocket):
+ if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
- if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
- except EnvironmentError as e:
+ except OSError as e:
if e.errno not in recoverable_errors:
raise
@@ -46,7 +44,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
- data = six.binary_type()
+ data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
@@ -134,7 +132,7 @@ def consume_socket_output(frames, demux=False):
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
- return six.binary_type().join(frames)
+ return bytes().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
@@ -166,4 +164,4 @@ def demux_adaptor(stream_id, data):
elif stream_id == STDERR:
return (None, data)
else:
- raise ValueError('{0} is not a valid stream'.format(stream_id))
+ raise ValueError(f'{stream_id} is not a valid stream')
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 7819ace..f7c3dd7 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -7,27 +7,14 @@ import string
from datetime import datetime
from distutils.version import StrictVersion
-import six
-
from .. import errors
from .. import tls
+from ..constants import DEFAULT_HTTP_HOST
+from ..constants import DEFAULT_UNIX_SOCKET
+from ..constants import DEFAULT_NPIPE
+from ..constants import BYTE_UNITS
-if six.PY2:
- from urllib import splitnport
- from urlparse import urlparse
-else:
- from urllib.parse import splitnport, urlparse
-
-DEFAULT_HTTP_HOST = "127.0.0.1"
-DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
-DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
-
-BYTE_UNITS = {
- 'b': 1,
- 'k': 1024,
- 'm': 1024 * 1024,
- 'g': 1024 * 1024 * 1024
-}
+from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
@@ -46,8 +33,7 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header):
data = base64.b64decode(header)
- if six.PY3:
- data = data.decode('utf-8')
+ data = data.decode('utf-8')
return json.loads(data)
@@ -87,7 +73,7 @@ def _convert_port_binding(binding):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
- elif isinstance(binding[0], six.string_types):
+ elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
@@ -111,7 +97,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings):
result = {}
- for k, v in six.iteritems(port_bindings):
+ for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
@@ -128,7 +114,7 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
- if isinstance(k, six.binary_type):
+ if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
@@ -139,7 +125,7 @@ def convert_volume_binds(binds):
)
bind = v['bind']
- if isinstance(bind, six.binary_type):
+ if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
@@ -150,13 +136,13 @@ def convert_volume_binds(binds):
mode = 'rw'
result.append(
- six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ f'{k}:{bind}:{mode}'
)
else:
- if isinstance(v, six.binary_type):
+ if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
- six.text_type('{0}:{1}:rw').format(k, v)
+ f'{k}:{v}:rw'
)
return result
@@ -173,7 +159,7 @@ def convert_tmpfs_mounts(tmpfs):
result = {}
for mount in tmpfs:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
@@ -198,7 +184,7 @@ def convert_service_networks(networks):
result = []
for n in networks:
- if isinstance(n, six.string_types):
+ if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
@@ -247,14 +233,14 @@ def parse_host(addr, is_win32=False, tls=False):
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
- "Invalid bind address protocol: {}".format(addr)
+ f"Invalid bind address protocol: {addr}"
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
- 'Invalid bind address format: {}'.format(addr)
+ f'Invalid bind address format: {addr}'
)
if any([
@@ -262,7 +248,7 @@ def parse_host(addr, is_win32=False, tls=False):
parsed_url.password
]):
raise errors.DockerException(
- 'Invalid bind address format: {}'.format(addr)
+ f'Invalid bind address format: {addr}'
)
if parsed_url.path and proto == 'ssh':
@@ -299,8 +285,8 @@ def parse_host(addr, is_win32=False, tls=False):
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
- return "{}://{}".format(proto, path).rstrip('/')
- return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
+ return f"{proto}://{path}".rstrip('/')
+ return f'{proto}://{host}:{port}{path}'.rstrip('/')
def parse_devices(devices):
@@ -309,9 +295,9 @@ def parse_devices(devices):
if isinstance(device, dict):
device_list.append(device)
continue
- if not isinstance(device, six.string_types):
+ if not isinstance(device, str):
raise errors.DockerException(
- 'Invalid device type {0}'.format(type(device))
+ f'Invalid device type {type(device)}'
)
device_mapping = device.split(':')
if device_mapping:
@@ -379,13 +365,13 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
def convert_filters(filters):
result = {}
- for k, v in six.iteritems(filters):
+ for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
- str(item) if not isinstance(item, six.string_types) else item
+ str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
@@ -398,7 +384,7 @@ def datetime_to_timestamp(dt):
def parse_bytes(s):
- if isinstance(s, six.integer_types + (float,)):
+ if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
@@ -419,10 +405,10 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit():
try:
- digits = int(digits_part)
+ digits = float(digits_part)
except ValueError:
raise errors.DockerException(
- 'Failed converting the string value for memory ({0}) to'
+ 'Failed converting the string value for memory ({}) to'
' an integer.'.format(digits_part)
)
@@ -430,7 +416,7 @@ def parse_bytes(s):
s = int(digits * units[suffix])
else:
raise errors.DockerException(
- 'The specified value for memory ({0}) should specify the'
+ 'The specified value for memory ({}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
@@ -440,9 +426,9 @@ def parse_bytes(s):
def normalize_links(links):
if isinstance(links, dict):
- links = six.iteritems(links)
+ links = iter(links.items())
- return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
+ return [f'{k}:{v}' if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
@@ -452,7 +438,7 @@ def parse_env_file(env_file):
"""
environment = {}
- with open(env_file, 'r') as f:
+ with open(env_file) as f:
for line in f:
if line[0] == '#':
@@ -468,15 +454,13 @@ def parse_env_file(env_file):
environment[k] = v
else:
raise errors.DockerException(
- 'Invalid line in environment file {0}:\n{1}'.format(
+ 'Invalid line in environment file {}:\n{}'.format(
env_file, line))
return environment
def split_command(command):
- if six.PY2 and not isinstance(command, six.binary_type):
- command = command.encode('utf-8')
return shlex.split(command)
@@ -484,22 +468,22 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
- if isinstance(value, six.binary_type):
+ if isinstance(value, bytes):
value = value.decode('utf-8')
- return u'{key}={value}'.format(key=key, value=value)
- return [format_env(*var) for var in six.iteritems(environment)]
+ return f'{key}={value}'
+ return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
- '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
]
return [
- '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
]
diff --git a/docker/version.py b/docker/version.py
index 99a8b42..4259432 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "4.1.0"
-version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
+version = "5.0.3"
+version_info = tuple(int(d) for d in version.split("-")[0].split("."))
diff --git a/requirements.txt b/requirements.txt
index 95d8fa6..34a3083 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,8 @@
appdirs>=1.4.3
asn1crypto>=0.22.0
backports.ssl-match-hostname>=3.5.0.1
-cffi>=1.10.0
-cryptography>=2.3
+cffi>=1.14.4
+cryptography>=3.2
enum34>=1.1.6
idna>=2.5
ipaddress>=1.0.18
@@ -11,10 +11,6 @@ paramiko>=2.4.2
pycparser>=2.17
pyOpenSSL>=18.0.0
pyparsing>=2.2.0
-pypiwin32>=219; sys_platform == 'win32' and python_version < '3.6'
-pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
-requests>=2.20.2
-six>=1.10.0
-websocket-client>=0.40.0
-urllib3>=1.24.3
+requests>=2.25.0
+urllib3>=1.26.5
websocket-client>=0.56.0
diff --git a/setup.py b/setup.py
index c29787b..5b9945d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-from __future__ import print_function
import codecs
import os
@@ -11,23 +10,13 @@ ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'six >= 1.4.0',
'websocket-client >= 0.32.0',
'requests >= 2.14.2, != 2.18.0',
]
extras_require = {
- ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
- # While not imported explicitly, the ipaddress module is required for
- # ssl_match_hostname to verify hosts match with certificates via
- # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
- ':python_version < "3.3"': 'ipaddress >= 1.0.16',
-
# win32 APIs if on Windows (required for npipe support)
- # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
- # on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
- ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
- ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223',
+ ':sys_platform == "win32"': 'pywin32==227',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
@@ -37,7 +26,7 @@ extras_require = {
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.2', 'idna>=2.0.0'],
# Only required when connecting using the ssh:// protocol
'ssh': ['paramiko>=2.4.2'],
@@ -72,7 +61,7 @@ setup(
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
+ python_requires='>=3.6',
zip_safe=False,
test_suite='tests',
classifiers=[
@@ -81,16 +70,15 @@ setup(
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
- maintainer='Joffrey F',
- maintainer_email='joffrey@docker.com',
+ maintainer='Ulysses Souza',
+ maintainer_email='ulysses.souza@docker.com',
)
diff --git a/test-requirements.txt b/test-requirements.txt
index ebc74f2..585e3fd 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,3 +1,4 @@
+setuptools>=54.1.1
coverage>=4.5.2
flake8>=3.6.0
mock>=1.0.1
diff --git a/tests/helpers.py b/tests/helpers.py
index f344e1c..63cbe2e 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -11,7 +11,6 @@ import time
import docker
import paramiko
import pytest
-import six
def make_tree(dirs, files):
@@ -54,7 +53,7 @@ def requires_api_version(version):
return pytest.mark.skipif(
docker.utils.version_lt(test_version, version),
- reason="API version is too low (< {0})".format(version)
+ reason=f"API version is too low (< {version})"
)
@@ -86,7 +85,7 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
def random_name():
- return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+ return f'dockerpytest_{random.getrandbits(64):x}'
def force_leave_swarm(client):
@@ -105,11 +104,11 @@ def force_leave_swarm(client):
def swarm_listen_addr():
- return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
+ return f'0.0.0.0:{random.randrange(10000, 25000)}'
def assert_cat_socket_detached_with_keys(sock, inputs):
- if six.PY3 and hasattr(sock, '_sock'):
+ if hasattr(sock, '_sock'):
sock = sock._sock
for i in inputs:
@@ -128,7 +127,7 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
# of the daemon no longer cause this to raise an error.
try:
sock.sendall(b'make sure the socket is closed\n')
- except socket.error:
+ except OSError:
return
sock.sendall(b"make sure the socket is closed\n")
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
index 5712812..ef48e12 100644
--- a/tests/integration/api_build_test.py
+++ b/tests/integration/api_build_test.py
@@ -7,7 +7,6 @@ from docker import errors
from docker.utils.proxy import ProxyConfig
import pytest
-import six
from .base import BaseAPIIntegrationTest, TEST_IMG
from ..helpers import random_name, requires_api_version, requires_experimental
@@ -71,9 +70,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert len(logs) > 0
def test_build_from_stringio(self):
- if six.PY3:
- return
- script = io.StringIO(six.text_type('\n').join([
+ return
+ script = io.StringIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
@@ -83,8 +81,7 @@ class BuildTest(BaseAPIIntegrationTest):
stream = self.client.build(fileobj=script)
logs = ''
for chunk in stream:
- if six.PY3:
- chunk = chunk.decode('utf-8')
+ chunk = chunk.decode('utf-8')
logs += chunk
assert logs != ''
@@ -135,8 +132,7 @@ class BuildTest(BaseAPIIntegrationTest):
self.client.wait(c)
logs = self.client.logs(c)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert sorted(list(filter(None, logs.split('\n')))) == sorted([
'/test/#file.txt',
@@ -339,10 +335,8 @@ class BuildTest(BaseAPIIntegrationTest):
assert self.client.inspect_image(img_name)
ctnr = self.run_container(img_name, 'cat /hosts-file')
- self.tmp_containers.append(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert '127.0.0.1\textrahost.local.test' in logs
assert '127.0.0.1\thello.world.test' in logs
@@ -377,7 +371,7 @@ class BuildTest(BaseAPIIntegrationTest):
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
- 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
+ f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
]))
stream = self.client.build(
@@ -441,7 +435,7 @@ class BuildTest(BaseAPIIntegrationTest):
@requires_api_version('1.32')
@requires_experimental(until=None)
def test_build_invalid_platform(self):
- script = io.BytesIO('FROM busybox\n'.encode('ascii'))
+ script = io.BytesIO(b'FROM busybox\n')
with pytest.raises(errors.APIError) as excinfo:
stream = self.client.build(fileobj=script, platform='foobar')
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
index 9e348f3..d1622fa 100644
--- a/tests/integration/api_client_test.py
+++ b/tests/integration/api_client_test.py
@@ -72,6 +72,6 @@ class UnixconnTest(unittest.TestCase):
client.close()
del client
- assert len(w) == 0, "No warnings produced: {0}".format(
+ assert len(w) == 0, "No warnings produced: {}".format(
w[0].message
)
diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py
index 0ffd767..82cb516 100644
--- a/tests/integration/api_config_test.py
+++ b/tests/integration/api_config_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import docker
import pytest
@@ -31,7 +29,7 @@ class ConfigAPITest(BaseAPIIntegrationTest):
def test_create_config_unicode_data(self):
config_id = self.client.create_config(
- 'favorite_character', u'いざよいさくや'
+ 'favorite_character', 'いざよいさくや'
)
self.tmp_configs.append(config_id)
assert 'ID' in config_id
@@ -70,3 +68,16 @@ class ConfigAPITest(BaseAPIIntegrationTest):
data = self.client.configs(filters={'name': ['favorite_character']})
assert len(data) == 1
assert data[0]['ID'] == config_id['ID']
+
+ @requires_api_version('1.37')
+ def test_create_config_with_templating(self):
+ config_id = self.client.create_config(
+ 'favorite_character', 'sakuya izayoi',
+ templating={ 'name': 'golang'}
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+ assert 'Templating' in data['Spec']
+ assert data['Spec']['Templating']['Name'] == 'golang'
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
index 1ba3eaa..9da2cfb 100644
--- a/tests/integration/api_container_test.py
+++ b/tests/integration/api_container_test.py
@@ -7,7 +7,6 @@ from datetime import datetime
import pytest
import requests
-import six
import docker
from .. import helpers
@@ -35,7 +34,7 @@ class ListContainersTest(BaseAPIIntegrationTest):
assert len(retrieved) == 1
retrieved = retrieved[0]
assert 'Command' in retrieved
- assert retrieved['Command'] == six.text_type('true')
+ assert retrieved['Command'] == 'true'
assert 'Image' in retrieved
assert re.search(r'alpine:.*', retrieved['Image'])
assert 'Status' in retrieved
@@ -104,13 +103,11 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container3_id)
assert self.client.wait(container3_id)['StatusCode'] == 0
- logs = self.client.logs(container3_id)
- if six.PY3:
- logs = logs.decode('utf-8')
- assert '{0}_NAME='.format(link_env_prefix1) in logs
- assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
- assert '{0}_NAME='.format(link_env_prefix2) in logs
- assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
+ logs = self.client.logs(container3_id).decode('utf-8')
+ assert f'{link_env_prefix1}_NAME=' in logs
+ assert f'{link_env_prefix1}_ENV_FOO=1' in logs
+ assert f'{link_env_prefix2}_NAME=' in logs
+ assert f'{link_env_prefix2}_ENV_FOO=1' in logs
def test_create_with_restart_policy(self):
container = self.client.create_container(
@@ -227,9 +224,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.wait(container)
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
assert '1001' in groups
@@ -244,9 +239,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
self.client.start(container)
self.client.wait(container)
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
groups = logs.strip().split(' ')
assert '1000' in groups
@@ -273,11 +266,14 @@ class CreateContainerTest(BaseAPIIntegrationTest):
def test_invalid_log_driver_raises_exception(self):
log_config = docker.types.LogConfig(
- type='asdf-nope',
+ type='asdf',
config={}
)
- expected_msg = "logger: no log driver named 'asdf-nope' is registered"
+ expected_msgs = [
+ "logger: no log driver named 'asdf' is registered",
+ "error looking up logging plugin asdf: plugin \"asdf\" not found",
+ ]
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500
container = self.client.create_container(
@@ -287,7 +283,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
self.client.start(container)
- assert excinfo.value.explanation == expected_msg
+ assert excinfo.value.explanation in expected_msgs
def test_valid_no_log_driver_specified(self):
log_config = docker.types.LogConfig(
@@ -491,7 +487,7 @@ class CreateContainerTest(BaseAPIIntegrationTest):
)
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
- super(VolumeBindTest, self).setUp()
+ super().setUp()
self.mount_dest = '/mnt'
@@ -512,10 +508,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
TEST_IMG,
['ls', self.mount_dest],
)
- logs = self.client.logs(container)
-
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -531,10 +524,8 @@ class VolumeBindTest(BaseAPIIntegrationTest):
TEST_IMG,
['ls', self.mount_dest],
)
- logs = self.client.logs(container)
+ logs = self.client.logs(container).decode('utf-8')
- if six.PY3:
- logs = logs.decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
@@ -551,9 +542,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
host_config=host_config
)
assert container
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -570,9 +559,7 @@ class VolumeBindTest(BaseAPIIntegrationTest):
host_config=host_config
)
assert container
- logs = self.client.logs(container)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(container).decode('utf-8')
assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
@@ -631,7 +618,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- TEST_IMG, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, f'sh -c "echo {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -642,15 +629,14 @@ class ArchiveTest(BaseAPIIntegrationTest):
for d in strm:
destination.write(d)
destination.seek(0)
- retrieved_data = helpers.untar_file(destination, 'data.txt')
- if six.PY3:
- retrieved_data = retrieved_data.decode('utf-8')
+ retrieved_data = helpers.untar_file(destination, 'data.txt')\
+ .decode('utf-8')
assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
- TEST_IMG, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
+ TEST_IMG, f'sh -c "echo -n {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
@@ -669,7 +655,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
test_file.seek(0)
ctnr = self.client.create_container(
TEST_IMG,
- 'cat {0}'.format(
+ 'cat {}'.format(
os.path.join('/vol1/', os.path.basename(test_file.name))
),
volumes=['/vol1']
@@ -680,9 +666,6 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
- data = data.decode('utf-8')
assert logs.strip() == data
def test_copy_directory_to_container(self):
@@ -697,9 +680,7 @@ class ArchiveTest(BaseAPIIntegrationTest):
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
- logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = self.client.logs(ctnr).decode('utf-8')
results = logs.strip().split()
assert 'a.py' in results
assert 'b.py' in results
@@ -720,7 +701,7 @@ class RenameContainerTest(BaseAPIIntegrationTest):
if version == '1.5.0':
assert name == inspect['Name']
else:
- assert '/{0}'.format(name) == inspect['Name']
+ assert f'/{name}' == inspect['Name']
class StartContainerTest(BaseAPIIntegrationTest):
@@ -826,7 +807,7 @@ class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- TEST_IMG, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -840,7 +821,7 @@ class LogsTest(BaseAPIIntegrationTest):
snippet = '''Line1
Line2'''
container = self.client.create_container(
- TEST_IMG, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -853,12 +834,12 @@ Line2'''
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- TEST_IMG, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- logs = six.binary_type()
+ logs = b''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
@@ -873,12 +854,12 @@ Line2'''
def test_logs_streaming_and_follow_and_cancel(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- TEST_IMG, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ TEST_IMG, f'sh -c "echo \\"{snippet}\\" && sleep 3"'
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- logs = six.binary_type()
+ logs = b''
generator = self.client.logs(id, stream=True, follow=True)
threading.Timer(1, generator.close).start()
@@ -891,7 +872,7 @@ Line2'''
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- TEST_IMG, 'echo {0}'.format(snippet)
+ TEST_IMG, f'echo {snippet}'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -904,7 +885,7 @@ Line2'''
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
- TEST_IMG, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
id = container['Id']
self.tmp_containers.append(id)
@@ -918,7 +899,7 @@ Line2'''
def test_logs_with_until(self):
snippet = 'Shanghai Teahouse (Hong Meiling)'
container = self.client.create_container(
- TEST_IMG, 'echo "{0}"'.format(snippet)
+ TEST_IMG, f'echo "{snippet}"'
)
self.tmp_containers.append(container)
@@ -1102,6 +1083,8 @@ class PortTest(BaseAPIIntegrationTest):
class ContainerTopTest(BaseAPIIntegrationTest):
+ @pytest.mark.xfail(reason='Output of docker top depends on host distro, '
+ 'and is not formalized.')
def test_top(self):
container = self.client.create_container(
TEST_IMG, ['sleep', '60']
@@ -1112,9 +1095,7 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.client.start(container)
res = self.client.top(container)
if not IS_WINDOWS_PLATFORM:
- assert res['Titles'] == [
- 'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD'
- ]
+ assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND']
assert len(res['Processes']) == 1
assert res['Processes'][0][-1] == 'sleep 60'
self.client.kill(container)
@@ -1122,6 +1103,8 @@ class ContainerTopTest(BaseAPIIntegrationTest):
@pytest.mark.skipif(
IS_WINDOWS_PLATFORM, reason='No psargs support on windows'
)
+ @pytest.mark.xfail(reason='Output of docker top depends on host distro, '
+ 'and is not formalized.')
def test_top_with_psargs(self):
container = self.client.create_container(
TEST_IMG, ['sleep', '60'])
@@ -1129,11 +1112,8 @@ class ContainerTopTest(BaseAPIIntegrationTest):
self.tmp_containers.append(container)
self.client.start(container)
- res = self.client.top(container, 'waux')
- assert res['Titles'] == [
- 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
- 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'
- ]
+ res = self.client.top(container, '-eopid,user')
+ assert res['Titles'] == ['PID', 'USER']
assert len(res['Processes']) == 1
assert res['Processes'][0][10] == 'sleep 60'
@@ -1223,7 +1203,7 @@ class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_reading_socket(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
- command = "printf '{0}'".format(line)
+ command = f"printf '{line}'"
container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
@@ -1507,7 +1487,7 @@ class LinkTest(BaseAPIIntegrationTest):
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
- link_name = '%s/%s' % (linked_name, link_alias)
+ link_name = f'{linked_name}/{link_alias}'
self.client.remove_container(link_name, link=True)
# Link is gone
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
index 554e862..4d7748f 100644
--- a/tests/integration/api_exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -239,7 +239,7 @@ class ExecDemuxTest(BaseAPIIntegrationTest):
)
def setUp(self):
- super(ExecDemuxTest, self).setUp()
+ super().setUp()
self.container = self.client.create_container(
TEST_IMG, 'cat', detach=True, stdin_open=True
)
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
index 2bc96ab..e30de46 100644
--- a/tests/integration/api_image_test.py
+++ b/tests/integration/api_image_test.py
@@ -7,9 +7,8 @@ import tempfile
import threading
import pytest
-import six
-from six.moves import BaseHTTPServer
-from six.moves import socketserver
+from http.server import SimpleHTTPRequestHandler
+import socketserver
import docker
@@ -33,7 +32,7 @@ class ListImagesTest(BaseAPIIntegrationTest):
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
- assert type(res1[0]) == six.text_type
+ assert type(res1[0]) == str
class PullImageTest(BaseAPIIntegrationTest):
@@ -42,9 +41,9 @@ class PullImageTest(BaseAPIIntegrationTest):
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- res = self.client.pull('hello-world', tag='latest')
+ res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
- assert type(res) == six.text_type
+ assert type(res) == str
assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
assert 'Id' in img_info
@@ -55,7 +54,7 @@ class PullImageTest(BaseAPIIntegrationTest):
except docker.errors.APIError:
pass
stream = self.client.pull(
- 'hello-world', tag='latest', stream=True, decode=True)
+ 'hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
@@ -266,14 +265,14 @@ class ImportImageTest(BaseAPIIntegrationTest):
output = self.client.load_image(data)
assert any([
line for line in output
- if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
+ if f'Loaded image: {test_img}' in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
- class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
+ class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
@@ -285,7 +284,7 @@ class ImportImageTest(BaseAPIIntegrationTest):
thread.setDaemon(True)
thread.start()
- yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
+ yield f'http://{socket.gethostname()}:{server.server_address[1]}'
server.shutdown()
@@ -351,7 +350,7 @@ class SaveLoadImagesTest(BaseAPIIntegrationTest):
result = self.client.load_image(f.read())
success = False
- result_line = 'Loaded image: {}\n'.format(TEST_IMG)
+ result_line = f'Loaded image: {TEST_IMG}\n'
for data in result:
print(data)
if 'stream' in data:
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
index 0f26827..2568138 100644
--- a/tests/integration/api_network_test.py
+++ b/tests/integration/api_network_test.py
@@ -9,7 +9,7 @@ from .base import BaseAPIIntegrationTest, TEST_IMG
class TestNetworks(BaseAPIIntegrationTest):
def tearDown(self):
self.client.leave_swarm(force=True)
- super(TestNetworks, self).tearDown()
+ super().tearDown()
def create_network(self, *args, **kwargs):
net_name = random_name()
@@ -275,6 +275,27 @@ class TestNetworks(BaseAPIIntegrationTest):
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
+ @requires_api_version('1.32')
+ def test_create_with_driveropt(self):
+ container = self.client.create_container(
+ TEST_IMG, 'top',
+ networking_config=self.client.create_networking_config(
+ {
+ 'bridge': self.client.create_endpoint_config(
+ driver_opt={'com.docker-py.setting': 'on'}
+ )
+ }
+ ),
+ host_config=self.client.create_host_config(network_mode='bridge')
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ container_data = self.client.inspect_container(container)
+ net_cfg = container_data['NetworkSettings']['Networks']['bridge']
+ assert 'DriverOpts' in net_cfg
+ assert 'com.docker-py.setting' in net_cfg['DriverOpts']
+ assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on'
+
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
index b3d93b8..fd98543 100644
--- a/tests/integration/api_secret_test.py
+++ b/tests/integration/api_secret_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import docker
import pytest
@@ -31,7 +29,7 @@ class SecretAPITest(BaseAPIIntegrationTest):
def test_create_secret_unicode_data(self):
secret_id = self.client.create_secret(
- 'favorite_character', u'いざよいさくや'
+ 'favorite_character', 'いざよいさくや'
)
self.tmp_secrets.append(secret_id)
assert 'ID' in secret_id
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
index b6b7ec5..dcf195d 100644
--- a/tests/integration/api_service_test.py
+++ b/tests/integration/api_service_test.py
@@ -1,11 +1,8 @@
-# -*- coding: utf-8 -*-
-
import random
import time
import docker
import pytest
-import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
@@ -31,10 +28,10 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
- super(ServiceTest, self).tearDown()
+ super().tearDown()
def get_service_name(self):
- return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+ return f'dockerpytest_{random.getrandbits(64):x}'
def get_service_container(self, service_name, attempts=20, interval=0.5,
include_stopped=False):
@@ -55,7 +52,7 @@ class ServiceTest(BaseAPIIntegrationTest):
def create_simple_service(self, name=None, labels=None):
if name:
- name = 'dockerpytest_{0}'.format(name)
+ name = f'dockerpytest_{name}'
else:
name = self.get_service_name()
@@ -150,7 +147,7 @@ class ServiceTest(BaseAPIIntegrationTest):
else:
break
- if six.PY3:
+ if log_line is not None:
log_line = log_line.decode('utf-8')
assert 'hello\n' in log_line
@@ -404,20 +401,20 @@ class ServiceTest(BaseAPIIntegrationTest):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(
- container_spec, placement=['node.id=={}'.format(node_id)]
+ container_spec, placement=[f'node.id=={node_id}']
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
- {'Constraints': ['node.id=={}'.format(node_id)]})
+ {'Constraints': [f'node.id=={node_id}']})
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
placemt = docker.types.Placement(
- constraints=['node.id=={}'.format(node_id)]
+ constraints=[f'node.id=={node_id}']
)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
@@ -471,6 +468,19 @@ class ServiceTest(BaseAPIIntegrationTest):
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+ @requires_api_version('1.40')
+ def test_create_service_with_placement_maxreplicas(self):
+ container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
+ placemt = docker.types.Placement(maxreplicas=1)
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
def test_create_service_with_endpoint_spec(self):
container_spec = docker.types.ContainerSpec(TEST_IMG, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
@@ -496,7 +506,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@@ -658,14 +668,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /run/secrets/{0}'.format(secret_name)
+ container, f'cat /run/secrets/{secret_name}'
)
assert self.client.exec_start(exec_id) == secret_data
@requires_api_version('1.25')
def test_create_service_with_unicode_secret(self):
secret_name = 'favorite_touhou'
- secret_data = u'東方花映塚'
+ secret_data = '東方花映塚'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
@@ -683,7 +693,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /run/secrets/{0}'.format(secret_name)
+ container, f'cat /run/secrets/{secret_name}'
)
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
@@ -710,14 +720,14 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /{0}'.format(config_name)
+ container, f'cat /{config_name}'
)
assert self.client.exec_start(exec_id) == config_data
@requires_api_version('1.30')
def test_create_service_with_unicode_config(self):
config_name = 'favorite_touhou'
- config_data = u'東方花映塚'
+ config_data = '東方花映塚'
config_id = self.client.create_config(config_name, config_data)
self.tmp_configs.append(config_id)
config_ref = docker.types.ConfigReference(config_id, config_name)
@@ -735,7 +745,7 @@ class ServiceTest(BaseAPIIntegrationTest):
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
- container, 'cat /{0}'.format(config_name)
+ container, f'cat /{config_name}'
)
container_config = self.client.exec_start(exec_id)
container_config = container_config.decode('utf-8')
@@ -1124,7 +1134,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@@ -1151,7 +1161,7 @@ class ServiceTest(BaseAPIIntegrationTest):
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
- self.fail('Invalid port specification: {0}'.format(port))
+ self.fail(f'Invalid port specification: {port}')
assert len(ports) == 3
@@ -1346,3 +1356,33 @@ class ServiceTest(BaseAPIIntegrationTest):
self.client.update_service(*args, **kwargs)
else:
raise
+
+ @requires_api_version('1.41')
+ def test_create_service_cap_add(self):
+ name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ TEST_IMG, ['echo', 'hello'], cap_add=['CAP_SYSLOG']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+ spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'CAP_SYSLOG' in spec['CapabilityAdd']
+
+ @requires_api_version('1.41')
+ def test_create_service_cap_drop(self):
+ name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ TEST_IMG, ['echo', 'hello'], cap_drop=['CAP_SYSLOG']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+ spec = services[0]['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'CAP_SYSLOG' in spec['CapabilityDrop']
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
index f1cbc26..48c0592 100644
--- a/tests/integration/api_swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -8,7 +8,7 @@ from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
- super(SwarmTest, self).setUp()
+ super().setUp()
force_leave_swarm(self.client)
self._unlock_key = None
@@ -19,7 +19,7 @@ class SwarmTest(BaseAPIIntegrationTest):
except docker.errors.APIError:
pass
force_leave_swarm(self.client)
- super(SwarmTest, self).tearDown()
+ super().tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):
diff --git a/tests/integration/base.py b/tests/integration/base.py
index a7613f6..031079c 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -75,11 +75,11 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
"""
def setUp(self):
- super(BaseAPIIntegrationTest, self).setUp()
+ super().setUp()
self.client = self.get_client_instance()
def tearDown(self):
- super(BaseAPIIntegrationTest, self).tearDown()
+ super().tearDown()
self.client.close()
@staticmethod
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index ec48835..ae94595 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import sys
import warnings
@@ -17,11 +15,11 @@ def setup_test_session():
try:
c.inspect_image(TEST_IMG)
except docker.errors.NotFound:
- print("\npulling {0}".format(TEST_IMG), file=sys.stderr)
+ print(f"\npulling {TEST_IMG}", file=sys.stderr)
for data in c.pull(TEST_IMG, stream=True, decode=True):
status = data.get("status")
progress = data.get("progress")
- detail = "{0} - {1}".format(status, progress)
+ detail = f"{status} - {progress}"
print(detail, file=sys.stderr)
# Double make sure we now have busybox
diff --git a/tests/integration/context_api_test.py b/tests/integration/context_api_test.py
new file mode 100644
index 0000000..a2a12a5
--- /dev/null
+++ b/tests/integration/context_api_test.py
@@ -0,0 +1,59 @@
+import os
+import tempfile
+import pytest
+from docker import errors
+from docker.context import ContextAPI
+from docker.tls import TLSConfig
+from .base import BaseAPIIntegrationTest
+
+
+class ContextLifecycleTest(BaseAPIIntegrationTest):
+ def test_lifecycle(self):
+ assert ContextAPI.get_context().Name == "default"
+ assert not ContextAPI.get_context("test")
+ assert ContextAPI.get_current_context().Name == "default"
+
+ dirpath = tempfile.mkdtemp()
+ ca = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "ca.pem"), mode="r")
+ cert = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "cert.pem"), mode="r")
+ key = tempfile.NamedTemporaryFile(
+ prefix=os.path.join(dirpath, "key.pem"), mode="r")
+
+ # create context 'test
+ docker_tls = TLSConfig(
+ client_cert=(cert.name, key.name),
+ ca_cert=ca.name)
+ ContextAPI.create_context(
+ "test", tls_cfg=docker_tls)
+
+ # check for a context 'test' in the context store
+ assert any([ctx.Name == "test" for ctx in ContextAPI.contexts()])
+ # retrieve a context object for 'test'
+ assert ContextAPI.get_context("test")
+ # remove context
+ ContextAPI.remove_context("test")
+ with pytest.raises(errors.ContextNotFound):
+ ContextAPI.inspect_context("test")
+ # check there is no 'test' context in store
+ assert not ContextAPI.get_context("test")
+
+ ca.close()
+ key.close()
+ cert.close()
+
+ def test_context_remove(self):
+ ContextAPI.create_context("test")
+ assert ContextAPI.inspect_context("test")["Name"] == "test"
+
+ ContextAPI.remove_context("test")
+ with pytest.raises(errors.ContextNotFound):
+ ContextAPI.inspect_context("test")
+
+ def test_load_context_without_orchestrator(self):
+ ContextAPI.create_context("test")
+ ctx = ContextAPI.get_context("test")
+ assert ctx
+ assert ctx.Name == "test"
+ assert ctx.Orchestrator is None
diff --git a/tests/integration/credentials/store_test.py b/tests/integration/credentials/store_test.py
index dd543e2..d0cfd54 100644
--- a/tests/integration/credentials/store_test.py
+++ b/tests/integration/credentials/store_test.py
@@ -3,7 +3,6 @@ import random
import sys
import pytest
-import six
from distutils.spawn import find_executable
from docker.credentials import (
@@ -12,7 +11,7 @@ from docker.credentials import (
)
-class TestStore(object):
+class TestStore:
def teardown_method(self):
for server in self.tmp_keys:
try:
@@ -33,7 +32,7 @@ class TestStore(object):
self.store = Store(DEFAULT_OSX_STORE)
def get_random_servername(self):
- res = 'pycreds_test_{:x}'.format(random.getrandbits(32))
+ res = f'pycreds_test_{random.getrandbits(32):x}'
self.tmp_keys.append(res)
return res
@@ -61,7 +60,7 @@ class TestStore(object):
def test_unicode_strings(self):
key = self.get_random_servername()
- key = six.u(key)
+ key = key
self.store.store(server=key, username='user', secret='pass')
data = self.store.get(key)
assert data
diff --git a/tests/integration/credentials/utils_test.py b/tests/integration/credentials/utils_test.py
index ad55f32..d7b2a1a 100644
--- a/tests/integration/credentials/utils_test.py
+++ b/tests/integration/credentials/utils_test.py
@@ -5,7 +5,7 @@ from docker.credentials.utils import create_environment_dict
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
@mock.patch.dict(os.environ)
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
index 375d972..94aa201 100644
--- a/tests/integration/models_images_test.py
+++ b/tests/integration/models_images_test.py
@@ -13,8 +13,8 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo hello world".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo hello world"
))
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
@@ -24,8 +24,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
- "FROM alpine\n"
- "RUN exit 1".encode('ascii')
+ b"FROM alpine\n"
+ b"RUN exit 1"
))
assert (
"The command '/bin/sh -c exit 1' returned a non-zero code: 1"
@@ -36,8 +36,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='some-tag', fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo hello world".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo hello world"
)
)
self.tmp_imgs.append(image.id)
@@ -47,8 +47,8 @@ class ImageCollectionTest(BaseIntegrationTest):
client = docker.from_env(version=TEST_API_VERSION)
image, _ = client.images.build(
tag='dup-txt-tag', fileobj=io.BytesIO(
- "FROM alpine\n"
- "CMD echo Successfully built abcd1234".encode('ascii')
+ b"FROM alpine\n"
+ b"CMD echo Successfully built abcd1234"
)
)
self.tmp_imgs.append(image.id)
@@ -86,9 +86,11 @@ class ImageCollectionTest(BaseIntegrationTest):
def test_pull_multiple(self):
client = docker.from_env(version=TEST_API_VERSION)
- images = client.images.pull('hello-world')
- assert len(images) == 1
- assert 'hello-world:latest' in images[0].attrs['RepoTags']
+ images = client.images.pull('hello-world', all_tags=True)
+ assert len(images) >= 1
+ assert any([
+ 'hello-world:latest' in img.attrs['RepoTags'] for img in images
+ ])
def test_load_error(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -117,7 +119,7 @@ class ImageCollectionTest(BaseIntegrationTest):
self.tmp_imgs.append(additional_tag)
image.reload()
with tempfile.TemporaryFile() as f:
- stream = image.save(named='{}:latest'.format(additional_tag))
+ stream = image.save(named=f'{additional_tag}:latest')
for chunk in stream:
f.write(chunk)
@@ -127,7 +129,7 @@ class ImageCollectionTest(BaseIntegrationTest):
assert len(result) == 1
assert result[0].id == image.id
- assert '{}:latest'.format(additional_tag) in result[0].tags
+ assert f'{additional_tag}:latest' in result[0].tags
def test_save_name_error(self):
client = docker.from_env(version=TEST_API_VERSION)
@@ -141,7 +143,7 @@ class ImageTest(BaseIntegrationTest):
def test_tag_and_remove(self):
repo = 'dockersdk.tests.images.test_tag'
tag = 'some-tag'
- identifier = '{}:{}'.format(repo, tag)
+ identifier = f'{repo}:{tag}'
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
index 36caa85..982842b 100644
--- a/tests/integration/models_services_test.py
+++ b/tests/integration/models_services_test.py
@@ -333,3 +333,41 @@ class ServiceTest(unittest.TestCase):
assert service.force_update()
service.reload()
assert service.version > initial_version
+
+ @helpers.requires_api_version('1.41')
+ def test_create_cap_add(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ labels={'foo': 'bar'},
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'},
+ cap_add=["CAP_SYSLOG"]
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+ assert "CAP_SYSLOG" in container_spec["CapabilityAdd"]
+
+ @helpers.requires_api_version('1.41')
+ def test_create_cap_drop(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ labels={'foo': 'bar'},
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'},
+ cap_drop=["CAP_SYSLOG"]
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+ assert "CAP_SYSLOG" in container_spec["CapabilityDrop"]
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index a63883c..deb9aff 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -2,7 +2,6 @@ import io
import random
import docker
-import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
@@ -39,8 +38,7 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
- if six.PY3:
- logs = logs.decode('utf-8')
+ logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
@@ -56,10 +54,10 @@ class TestRegressions(BaseAPIIntegrationTest):
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
- )[0]['HostPort'] == six.text_type(tcp_port)
+ )[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
- )[0]['HostPort'] == six.text_type(tcp_port)
+ )[0]['HostPort'] == str(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
- )[0]['HostPort'] == six.text_type(udp_port)
+ )[0]['HostPort'] == str(udp_port)
diff --git a/tests/ssh/__init__.py b/tests/ssh/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/ssh/__init__.py
diff --git a/tests/ssh/api_build_test.py b/tests/ssh/api_build_test.py
new file mode 100644
index 0000000..ef48e12
--- /dev/null
+++ b/tests/ssh/api_build_test.py
@@ -0,0 +1,590 @@
+import io
+import os
+import shutil
+import tempfile
+
+from docker import errors
+from docker.utils.proxy import ProxyConfig
+
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_IMG
+from ..helpers import random_name, requires_api_version, requires_experimental
+
+
+class BuildTest(BaseAPIIntegrationTest):
+ def test_build_with_proxy(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=a"',
+ 'RUN env | grep "ftp_proxy=a"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script, decode=True)
+
+ def test_build_with_proxy_and_buildargs(self):
+ self.client._proxy_configs = ProxyConfig(
+ ftp='a', http='b', https='c', no_proxy='d'
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN env | grep "FTP_PROXY=XXX"',
+ 'RUN env | grep "ftp_proxy=xxx"',
+ 'RUN env | grep "HTTP_PROXY=b"',
+ 'RUN env | grep "http_proxy=b"',
+ 'RUN env | grep "HTTPS_PROXY=c"',
+ 'RUN env | grep "https_proxy=c"',
+ 'RUN env | grep "NO_PROXY=d"',
+ 'RUN env | grep "no_proxy=d"',
+ ]).encode('ascii'))
+
+ self.client.build(
+ fileobj=script,
+ decode=True,
+ buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'}
+ )
+
+ def test_build_streaming(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ stream = self.client.build(fileobj=script, decode=True)
+ logs = []
+ for chunk in stream:
+ logs.append(chunk)
+ assert len(logs) > 0
+
+ def test_build_from_stringio(self):
+ return
+ script = io.StringIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]))
+ stream = self.client.build(fileobj=script)
+ logs = ''
+ for chunk in stream:
+ chunk = chunk.decode('utf-8')
+ logs += chunk
+ assert logs != ''
+
+ def test_build_with_dockerignore(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write("\n".join([
+ 'ignored',
+ 'Dockerfile',
+ '.dockerignore',
+ '!ignored/subdir/excepted-file',
+ '', # empty line,
+ '#*', # comment line
+ ]))
+
+ with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
+ f.write('this file should not be ignored')
+
+ subdir = os.path.join(base_dir, 'ignored', 'subdir')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'file'), 'w') as f:
+ f.write("this file should be ignored")
+
+ with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ tag = 'docker-py-test-build-with-dockerignore'
+ stream = self.client.build(
+ path=base_dir,
+ tag=tag,
+ )
+ for chunk in stream:
+ pass
+
+ c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
+ self.client.start(c)
+ self.client.wait(c)
+ logs = self.client.logs(c)
+
+ logs = logs.decode('utf-8')
+
+ assert sorted(list(filter(None, logs.split('\n')))) == sorted([
+ '/test/#file.txt',
+ '/test/ignored/subdir/excepted-file',
+ '/test/not-ignored'
+ ])
+
+ def test_build_with_buildargs(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'ARG test',
+ 'USER $test'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
+ )
+ self.tmp_imgs.append('buildargs')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('buildargs')
+ assert info['Config']['User'] == 'OK'
+
+ @requires_api_version('1.22')
+ def test_build_shmsize(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Hello, World!\'"',
+ ]).encode('ascii'))
+
+ tag = 'shmsize'
+ shmsize = 134217728
+
+ stream = self.client.build(
+ fileobj=script, tag=tag, shmsize=shmsize
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ # There is currently no way to get the shmsize
+ # that was used to build the image
+
+ @requires_api_version('1.24')
+ def test_build_isolation(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Deaf To All But The Song\''
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='isolation',
+ isolation='default'
+ )
+
+ for chunk in stream:
+ pass
+
+ @requires_api_version('1.23')
+ def test_build_labels(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ ]).encode('ascii'))
+
+ labels = {'test': 'OK'}
+
+ stream = self.client.build(
+ fileobj=script, tag='labels', labels=labels
+ )
+ self.tmp_imgs.append('labels')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('labels')
+ assert info['Config']['Labels'] == labels
+
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
+ @requires_api_version('1.29')
+ def test_build_container_with_target(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox as first',
+ 'RUN mkdir -p /tmp/test',
+ 'RUN touch /tmp/silence.tar.gz',
+ 'FROM alpine:latest',
+ 'WORKDIR /root/'
+ 'COPY --from=first /tmp/silence.tar.gz .',
+ 'ONBUILD RUN echo "This should not be in the final image"'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, target='first', tag='build1'
+ )
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('build1')
+ assert not info['Config']['OnBuild']
+
+ @requires_api_version('1.25')
+ def test_build_with_network_mode(self):
+ # Set up pingable endpoint on custom network
+ network = self.client.create_network(random_name())['Id']
+ self.tmp_networks.append(network)
+ container = self.client.create_container(TEST_IMG, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.connect_container_to_network(
+ container, network, aliases=['pingtarget.docker']
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 pingtarget.docker'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, network_mode=network,
+ tag='dockerpytest_customnetbuild'
+ )
+
+ self.tmp_imgs.append('dockerpytest_customnetbuild')
+ for chunk in stream:
+ pass
+
+ assert self.client.inspect_image('dockerpytest_customnetbuild')
+
+ script.seek(0)
+ stream = self.client.build(
+ fileobj=script, network_mode='none',
+ tag='dockerpytest_nonebuild', nocache=True, decode=True
+ )
+
+ self.tmp_imgs.append('dockerpytest_nonebuild')
+ logs = [chunk for chunk in stream]
+ assert 'errorDetail' in logs[-1]
+ assert logs[-1]['errorDetail']['code'] == 1
+
+ with pytest.raises(errors.NotFound):
+ self.client.inspect_image('dockerpytest_nonebuild')
+
+ @requires_api_version('1.27')
+ def test_build_with_extra_hosts(self):
+ img_name = 'dockerpytest_extrahost_build'
+ self.tmp_imgs.append(img_name)
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 hello.world.test',
+ 'RUN ping -c1 extrahost.local.test',
+ 'RUN cp /etc/hosts /hosts-file'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag=img_name,
+ extra_hosts={
+ 'extrahost.local.test': '127.0.0.1',
+ 'hello.world.test': '127.0.0.1',
+ }, decode=True
+ )
+ for chunk in stream:
+ if 'errorDetail' in chunk:
+ pytest.fail(chunk)
+
+ assert self.client.inspect_image(img_name)
+ ctnr = self.run_container(img_name, 'cat /hosts-file')
+ logs = self.client.logs(ctnr)
+ logs = logs.decode('utf-8')
+ assert '127.0.0.1\textrahost.local.test' in logs
+ assert '127.0.0.1\thello.world.test' in logs
+
+ @requires_experimental(until=None)
+ @requires_api_version('1.25')
+ def test_build_squash(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN echo blah > /file_1',
+ 'RUN echo blahblah > /file_2',
+ 'RUN echo blahblahblah > /file_3'
+ ]).encode('ascii'))
+
+ def build_squashed(squash):
+ tag = 'squash' if squash else 'nosquash'
+ stream = self.client.build(
+ fileobj=script, tag=tag, squash=squash
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ return self.client.inspect_image(tag)
+
+ non_squashed = build_squashed(False)
+ squashed = build_squashed(True)
+ assert len(non_squashed['RootFS']['Layers']) == 4
+ assert len(squashed['RootFS']['Layers']) == 2
+
+ def test_build_stderr_data(self):
+ control_chars = ['\x1b[91m', '\x1b[0m']
+ snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
+ script = io.BytesIO(b'\n'.join([
+ b'FROM busybox',
+ f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
+ ]))
+
+ stream = self.client.build(
+ fileobj=script, decode=True, nocache=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk.get('stream'))
+ expected = '{0}{2}\n{1}'.format(
+ control_chars[0], control_chars[1], snippet
+ )
+ assert any([line == expected for line in lines])
+
+ def test_build_gzip_encoding(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True,
+ gzip=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_with_dockerfile_empty_lines(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('\n'.join([
+ ' ',
+ '',
+ '\t\t',
+ '\t ',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_gzip_custom_encoding(self):
+ with pytest.raises(errors.DockerException):
+ self.client.build(path='.', gzip=True, encoding='text/html')
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_build_invalid_platform(self):
+ script = io.BytesIO(b'FROM busybox\n')
+
+ with pytest.raises(errors.APIError) as excinfo:
+ stream = self.client.build(fileobj=script, platform='foobar')
+ for _ in stream:
+ pass
+
+ # Some API versions incorrectly returns 500 status; assert 4xx or 5xx
+ assert excinfo.value.is_error()
+ assert 'unknown operating system' in excinfo.exconly() \
+ or 'invalid platform' in excinfo.exconly()
+
+ def test_build_out_of_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('.dockerignore\n')
+ df_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, df_dir)
+ df_name = os.path.join(df_dir, 'Dockerfile')
+ with open(df_name, 'wb') as df:
+ df.write(('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ])).encode('utf-8'))
+ df.flush()
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=df_name, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 3
+ assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
+
+ def test_build_in_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_nested_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ subdir = os.path.join(base_dir, 'hello', 'world')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='hello/world/custom.dockerfile',
+ tag=img_name, decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'hello']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_abs_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile')
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(abs_dockerfile_path, 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ @requires_api_version('1.31')
+ @pytest.mark.xfail(
+ True,
+ reason='Currently fails on 18.09: '
+ 'https://github.com/moby/moby/issues/37920'
+ )
+ def test_prune_builds(self):
+ prune_result = self.client.prune_builds()
+ assert 'SpaceReclaimed' in prune_result
+ assert isinstance(prune_result['SpaceReclaimed'], int)
diff --git a/tests/ssh/base.py b/tests/ssh/base.py
new file mode 100644
index 0000000..4825227
--- /dev/null
+++ b/tests/ssh/base.py
@@ -0,0 +1,130 @@
+import os
+import shutil
+import unittest
+
+import docker
+from .. import helpers
+from docker.utils import kwargs_from_env
+
+TEST_IMG = 'alpine:3.10'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
+
+
+class BaseIntegrationTest(unittest.TestCase):
+ """
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
+ """
+
+ def setUp(self):
+ self.tmp_imgs = []
+ self.tmp_containers = []
+ self.tmp_folders = []
+ self.tmp_volumes = []
+ self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
+ self.tmp_configs = []
+
+ def tearDown(self):
+ client = docker.from_env(version=TEST_API_VERSION, use_ssh_client=True)
+ try:
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+ finally:
+ client.close()
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+ @classmethod
+ def setUpClass(cls):
+ cls.client = cls.get_client_instance()
+ cls.client.pull(TEST_IMG)
+
+ def tearDown(self):
+ super().tearDown()
+ self.client.close()
+
+ @staticmethod
+ def get_client_instance():
+ return docker.APIClient(
+ version=TEST_API_VERSION,
+ timeout=60,
+ use_ssh_client=True,
+ **kwargs_from_env()
+ )
+
+ @staticmethod
+ def _init_swarm(client, **kwargs):
+ return client.init_swarm(
+ '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
+
+ def run_container(self, *args, **kwargs):
+ container = self.client.create_container(*args, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+
+ if exitcode != 0:
+ output = self.client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}"
+ .format(exitcode, output))
+
+ return container
+
+ def create_and_start(self, image=TEST_IMG, command='top', **kwargs):
+ container = self.client.create_container(
+ image=image, command=command, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ return container
+
+ def execute(self, container, cmd, exit_code=0, **kwargs):
+ exc = self.client.exec_create(container, cmd, **kwargs)
+ output = self.client.exec_start(exc)
+ actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
+ msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
+ " ".join(cmd), exit_code, actual_exit_code, output)
+ assert actual_exit_code == exit_code, msg
+
+ def init_swarm(self, **kwargs):
+ return self._init_swarm(self.client, **kwargs)
diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
index a7e183c..1ebd37d 100644
--- a/tests/unit/api_container_test.py
+++ b/tests/unit/api_container_test.py
@@ -1,24 +1,22 @@
-# -*- coding: utf-8 -*-
-
import datetime
import json
import signal
import docker
+from docker.api import APIClient
import pytest
-import six
from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
- fake_inspect_container
+ fake_inspect_container, url_base
)
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
def fake_inspect_container_tty(self, container):
@@ -767,10 +765,71 @@ class CreateContainerTest(BaseAPIClientTest):
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+ def test_create_container_with_device_requests(self):
+ client = APIClient(version='1.40')
+ fake_api.fake_responses.setdefault(
+ f'{fake_api.prefix}/v1.40/containers/create',
+ fake_api.post_fake_create_container,
+ )
+ client.create_container(
+ 'busybox', 'true', host_config=client.create_host_config(
+ device_requests=[
+ {
+ 'device_ids': [
+ '0',
+ 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ ]
+ },
+ {
+ 'driver': 'nvidia',
+ 'Count': -1,
+ 'capabilities': [
+ ['gpu', 'utility']
+ ],
+ 'options': {
+ 'key': 'value'
+ }
+ }
+ ]
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_base + 'v1.40/' + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = client.create_host_config()
+ expected_payload['HostConfig']['DeviceRequests'] = [
+ {
+ 'Driver': '',
+ 'Count': 0,
+ 'DeviceIDs': [
+ '0',
+ 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ ],
+ 'Capabilities': [],
+ 'Options': {}
+ },
+ {
+ 'Driver': 'nvidia',
+ 'Count': -1,
+ 'DeviceIDs': [],
+ 'Capabilities': [
+ ['gpu', 'utility']
+ ],
+ 'Options': {
+ 'key': 'value'
+ }
+ }
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers']['Content-Type'] == 'application/json'
+ assert set(args[1]['headers']) <= {'Content-Type', 'User-Agent'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
def test_create_container_with_labels_dict(self):
labels_dict = {
- six.text_type('foo'): six.text_type('1'),
- six.text_type('bar'): six.text_type('2'),
+ 'foo': '1',
+ 'bar': '2',
}
self.client.create_container(
@@ -786,12 +845,12 @@ class CreateContainerTest(BaseAPIClientTest):
def test_create_container_with_labels_list(self):
labels_list = [
- six.text_type('foo'),
- six.text_type('bar'),
+ 'foo',
+ 'bar',
]
labels_dict = {
- six.text_type('foo'): six.text_type(),
- six.text_type('bar'): six.text_type(),
+ 'foo': '',
+ 'bar': '',
}
self.client.create_container(
@@ -951,11 +1010,11 @@ class CreateContainerTest(BaseAPIClientTest):
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
- 'foo': u'☃',
+ 'foo': '☃',
}
expected = [
- u'foo=☃'
+ 'foo=☃'
]
self.client.create_container(
@@ -1076,7 +1135,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+ assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n'
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
@@ -1092,7 +1151,7 @@ class ContainerTest(BaseAPIClientTest):
stream=False
)
- assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+ assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n'
def test_log_streaming(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py
index a9d2dd5..4504250 100644
--- a/tests/unit/api_exec_test.py
+++ b/tests/unit/api_exec_test.py
@@ -11,7 +11,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
- assert 'POST' == args[0][0], url_prefix + 'containers/{0}/exec'.format(
+ assert 'POST' == args[0][0], url_prefix + 'containers/{}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
@@ -32,7 +32,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/start'.format(
fake_api.FAKE_EXEC_ID
)
@@ -51,7 +51,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/start'.format(
fake_api.FAKE_EXEC_ID
)
@@ -68,7 +68,7 @@ class ExecTest(BaseAPIClientTest):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
- assert args[0][1] == url_prefix + 'exec/{0}/json'.format(
+ assert args[0][1] == url_prefix + 'exec/{}/json'.format(
fake_api.FAKE_EXEC_ID
)
@@ -77,7 +77,7 @@ class ExecTest(BaseAPIClientTest):
fake_request.assert_called_with(
'POST',
- url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
+ url_prefix + f'exec/{fake_api.FAKE_EXEC_ID}/resize',
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py
index 1e2315d..843c11b 100644
--- a/tests/unit/api_image_test.py
+++ b/tests/unit/api_image_test.py
@@ -11,7 +11,7 @@ from .api_test import (
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class ImageTest(BaseAPIClientTest):
@@ -26,7 +26,18 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 0, 'all': 1},
+ params={'only_ids': 0, 'all': 1},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_images_name(self):
+ self.client.images('foo:bar')
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'only_ids': 0, 'all': 0,
+ 'filters': '{"reference": ["foo:bar"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -36,7 +47,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 1, 'all': 1},
+ params={'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -46,7 +57,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 1, 'all': 0},
+ params={'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -56,7 +67,7 @@ class ImageTest(BaseAPIClientTest):
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
- params={'filter': None, 'only_ids': 0, 'all': 0,
+ params={'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@@ -67,7 +78,7 @@ class ImageTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][1] == url_prefix + 'images/create'
assert args[1]['params'] == {
- 'tag': None, 'fromImage': 'joffrey/test001'
+ 'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert not args[1]['stream']
@@ -77,7 +88,7 @@ class ImageTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][1] == url_prefix + 'images/create'
assert args[1]['params'] == {
- 'tag': None, 'fromImage': 'joffrey/test001'
+ 'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert args[1]['stream']
diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py
index c78554d..84d6544 100644
--- a/tests/unit/api_network_test.py
+++ b/tests/unit/api_network_test.py
@@ -1,14 +1,12 @@
import json
-import six
-
from .api_test import BaseAPIClientTest, url_prefix, response
from docker.types import IPAMConfig, IPAMPool
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class NetworkTest(BaseAPIClientTest):
@@ -103,16 +101,16 @@ class NetworkTest(BaseAPIClientTest):
self.client.remove_network(network_id)
args = delete.call_args
- assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+ assert args[0][0] == url_prefix + f'networks/{network_id}'
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
- six.u('name'): network_name,
- six.u('id'): network_id,
- six.u('driver'): 'bridge',
- six.u('containers'): {},
+ 'name': network_name,
+ 'id': network_id,
+ 'driver': 'bridge',
+ 'containers': {},
}
network_response = response(status_code=200, content=network_data)
@@ -123,7 +121,7 @@ class NetworkTest(BaseAPIClientTest):
assert result == network_data
args = get.call_args
- assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+ assert args[0][0] == url_prefix + f'networks/{network_id}'
def test_connect_container_to_network(self):
network_id = 'abc12345'
@@ -136,11 +134,12 @@ class NetworkTest(BaseAPIClientTest):
container={'Id': container_id},
net_id=network_id,
aliases=['foo', 'bar'],
- links=[('baz', 'quux')]
+ links=[('baz', 'quux')],
+ driver_opt={'com.docker-py.setting': 'yes'},
)
assert post.call_args[0][0] == (
- url_prefix + 'networks/{0}/connect'.format(network_id)
+ url_prefix + f'networks/{network_id}/connect'
)
assert json.loads(post.call_args[1]['data']) == {
@@ -148,6 +147,7 @@ class NetworkTest(BaseAPIClientTest):
'EndpointConfig': {
'Aliases': ['foo', 'bar'],
'Links': ['baz:quux'],
+ 'DriverOpts': {'com.docker-py.setting': 'yes'},
},
}
@@ -162,7 +162,7 @@ class NetworkTest(BaseAPIClientTest):
container={'Id': container_id}, net_id=network_id)
assert post.call_args[0][0] == (
- url_prefix + 'networks/{0}/disconnect'.format(network_id)
+ url_prefix + f'networks/{network_id}/disconnect'
)
assert json.loads(post.call_args[1]['data']) == {
'Container': container_id
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index f4d220a..dfc3816 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,30 +1,31 @@
import datetime
-import json
import io
+import json
import os
import re
import shutil
import socket
+import struct
import tempfile
import threading
import time
import unittest
+import socketserver
+import http.server
import docker
-from docker.api import APIClient
+import pytest
import requests
+from docker.api import APIClient
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from requests.packages import urllib3
-import six
-import struct
from . import fake_api
-import pytest
-
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
@@ -34,7 +35,7 @@ def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
- if not isinstance(content, six.binary_type):
+ if not isinstance(content, bytes):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
@@ -60,7 +61,7 @@ def fake_resp(method, url, *args, **kwargs):
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
- raise Exception('{0} {1}'.format(method, url))
+ raise Exception(f'{method} {url}')
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
@@ -85,11 +86,11 @@ def fake_delete(self, url, *args, **kwargs):
def fake_read_from_socket(self, response, stream, tty=False, demux=False):
- return six.binary_type()
+ return bytes()
-url_base = '{0}/'.format(fake_api.prefix)
-url_prefix = '{0}v{1}/'.format(
+url_base = f'{fake_api.prefix}/'
+url_prefix = '{}v{}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
@@ -105,7 +106,7 @@ class BaseAPIClientTest(unittest.TestCase):
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
- self.client = APIClient()
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
def tearDown(self):
self.client.close()
@@ -133,20 +134,20 @@ class DockerApiTest(BaseAPIClientTest):
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
- assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
+ assert url == '{}{}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
- assert url == '{0}{1}'.format(
+ assert url == '{}{}'.format(
url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
- assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
+ assert url == '{}{}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
- assert url == '{0}{1}'.format(
+ assert url == '{}{}'.format(
url_prefix, 'images/localhost:5000/image/push'
)
@@ -156,13 +157,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_url_no_resource(self):
url = self.client._url('/simple')
- assert url == '{0}{1}'.format(url_prefix, 'simple')
+ assert url == '{}{}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
- assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
+ assert url == '{}{}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
@@ -184,13 +185,13 @@ class DockerApiTest(BaseAPIClientTest):
def test_retrieve_server_version(self):
client = APIClient(version="auto")
- assert isinstance(client._version, six.string_types)
+ assert isinstance(client._version, str)
assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
- assert isinstance(version, six.string_types)
+ assert isinstance(version, str)
def test_info(self):
self.client.info()
@@ -282,27 +283,37 @@ class DockerApiTest(BaseAPIClientTest):
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
- c = APIClient(base_url="unix://socket")
+ c = APIClient(
+ base_url="unix://socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
- c = APIClient(base_url="unix:///socket")
+ c = APIClient(
+ base_url="unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
- c = APIClient(base_url="http+unix:///socket")
+ c = APIClient(
+ base_url="http+unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
- c = APIClient(base_url="http://hostname:1234")
+ c = APIClient(
+ base_url="http://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
- c = APIClient(base_url="tcp://hostname:1234")
+ c = APIClient(
+ base_url="tcp://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
assert c.base_url == "http://hostname:1234"
@@ -327,8 +338,7 @@ class DockerApiTest(BaseAPIClientTest):
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
- if six.PY3:
- content_str = content_str.encode('utf-8')
+ content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
@@ -395,7 +405,7 @@ class UnixSocketStreamTest(unittest.TestCase):
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
- except socket.error:
+ except OSError:
# Probably no connection to accept yet
time.sleep(0.01)
continue
@@ -447,7 +457,9 @@ class UnixSocketStreamTest(unittest.TestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with APIClient(base_url="http+unix://" + self.socket_file) as client:
+ with APIClient(
+ base_url="http+unix://" + self.socket_file,
+ version=DEFAULT_DOCKER_API_VERSION) as client:
for i in range(5):
try:
stream = client.build(
@@ -477,7 +489,7 @@ class TCPSocketStreamTest(unittest.TestCase):
@classmethod
def setup_class(cls):
- cls.server = six.moves.socketserver.ThreadingTCPServer(
+ cls.server = socketserver.ThreadingTCPServer(
('', 0), cls.get_handler_class())
cls.thread = threading.Thread(target=cls.server.serve_forever)
cls.thread.setDaemon(True)
@@ -496,7 +508,7 @@ class TCPSocketStreamTest(unittest.TestCase):
stdout_data = cls.stdout_data
stderr_data = cls.stderr_data
- class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
+ class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
resp_data = self.get_resp_data()
self.send_response(101)
@@ -522,7 +534,7 @@ class TCPSocketStreamTest(unittest.TestCase):
data += stderr_data
return data
else:
- raise Exception('Unknown path {0}'.format(path))
+ raise Exception(f'Unknown path {path}')
@staticmethod
def frame_header(stream, data):
@@ -532,7 +544,10 @@ class TCPSocketStreamTest(unittest.TestCase):
def request(self, stream=None, tty=None, demux=None):
assert stream is not None and tty is not None and demux is not None
- with APIClient(base_url=self.address) as client:
+ with APIClient(
+ base_url=self.address,
+ version=DEFAULT_DOCKER_API_VERSION
+ ) as client:
if tty:
url = client._url('/tty')
else:
@@ -597,7 +612,7 @@ class UserAgentTest(unittest.TestCase):
self.patcher.stop()
def test_default_user_agent(self):
- client = APIClient()
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
client.version()
assert self.mock_send.call_count == 1
@@ -606,7 +621,9 @@ class UserAgentTest(unittest.TestCase):
assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
- client = APIClient(user_agent='foo/bar')
+ client = APIClient(
+ user_agent='foo/bar',
+ version=DEFAULT_DOCKER_API_VERSION)
client.version()
assert self.mock_send.call_count == 1
@@ -615,7 +632,7 @@ class UserAgentTest(unittest.TestCase):
class DisableSocketTest(unittest.TestCase):
- class DummySocket(object):
+ class DummySocket:
def __init__(self, timeout=60):
self.timeout = timeout
@@ -626,7 +643,7 @@ class DisableSocketTest(unittest.TestCase):
return self.timeout
def setUp(self):
- self.client = APIClient()
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py
index 7850c22..a8d9193 100644
--- a/tests/unit/api_volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -104,7 +104,7 @@ class VolumeTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][0] == 'GET'
- assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
+ assert args[0][1] == f'{url_prefix}volumes/{name}'
def test_remove_volume(self):
name = 'perfectcherryblossom'
@@ -112,4 +112,4 @@ class VolumeTest(BaseAPIClientTest):
args = fake_request.call_args
assert args[0][0] == 'DELETE'
- assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
+ assert args[0][1] == f'{url_prefix}volumes/{name}'
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index aac8910..8bd2e16 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import base64
import json
import os
@@ -15,7 +13,7 @@ import pytest
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class RegressionTest(unittest.TestCase):
@@ -239,7 +237,7 @@ class LoadConfigTest(unittest.TestCase):
cfg_path = os.path.join(folder, '.dockercfg')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
with open(cfg_path, 'w') as f:
- f.write('auth = {0}\n'.format(auth_))
+ f.write(f'auth = {auth_}\n')
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(cfg_path)
@@ -297,13 +295,13 @@ class LoadConfigTest(unittest.TestCase):
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
- '.{0}.dockercfg'.format(
+ '.{}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -329,7 +327,7 @@ class LoadConfigTest(unittest.TestCase):
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -357,7 +355,7 @@ class LoadConfigTest(unittest.TestCase):
config = {
'auths': {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -386,7 +384,7 @@ class LoadConfigTest(unittest.TestCase):
config = {
'auths': {
registry: {
- 'auth': '{0}'.format(auth_),
+ 'auth': f'{auth_}',
'email': 'sakuya@scarlet.net'
}
}
@@ -794,9 +792,9 @@ class InMemoryStore(credentials.Store):
}
def list(self):
- return dict(
- [(k, v['Username']) for k, v in self.__store.items()]
- )
+ return {
+ k: v['Username'] for k, v in self.__store.items()
+ }
def erase(self, server):
del self.__store[server]
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index cce99c5..d647d3a 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -1,22 +1,24 @@
import datetime
+import os
+import unittest
+
import docker
-from docker.utils import kwargs_from_env
+import pytest
from docker.constants import (
- DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
+ DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_MAX_POOL_SIZE, IS_WINDOWS_PLATFORM
)
-import os
-import unittest
+from docker.utils import kwargs_from_env
from . import fake_api
-import pytest
try:
from unittest import mock
except ImportError:
- import mock
-
+ from unittest import mock
TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+POOL_SIZE = 20
class ClientTest(unittest.TestCase):
@@ -25,33 +27,33 @@ class ClientTest(unittest.TestCase):
def test_events(self, mock_func):
since = datetime.datetime(2016, 1, 1, 0, 0)
mock_func.return_value = fake_api.get_fake_events()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.events(since=since) == mock_func.return_value
mock_func.assert_called_with(since=since)
@mock.patch('docker.api.APIClient.info')
def test_info(self, mock_func):
mock_func.return_value = fake_api.get_fake_info()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.info() == mock_func.return_value
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.ping')
def test_ping(self, mock_func):
mock_func.return_value = True
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.ping() is True
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.version')
def test_version(self, mock_func):
mock_func.return_value = fake_api.get_fake_version()[1]
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.version() == mock_func.return_value
mock_func.assert_called_with()
def test_call_api_client_method(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
with pytest.raises(AttributeError) as cm:
client.create_container()
s = cm.exconly()
@@ -65,7 +67,9 @@ class ClientTest(unittest.TestCase):
assert "this method is now on the object APIClient" not in s
def test_call_containers(self):
- client = docker.DockerClient(**kwargs_from_env())
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ **kwargs_from_env())
with pytest.raises(TypeError) as cm:
client.containers()
@@ -74,6 +78,84 @@ class ClientTest(unittest.TestCase):
assert "'ContainerCollection' object is not callable" in s
assert "docker.APIClient" in s
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_default_pool_size_unix(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_default_pool_size_win(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_pool_size_unix(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_pool_size_win(self, mock_obj):
+ client = docker.DockerClient(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
class FromEnvTest(unittest.TestCase):
@@ -90,7 +172,7 @@ class FromEnvTest(unittest.TestCase):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api.base_url == "https://192.168.59.103:2376"
def test_from_env_with_version(self):
@@ -102,11 +184,85 @@ class FromEnvTest(unittest.TestCase):
assert client.api._version == '2.32'
def test_from_env_without_version_uses_default(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api._version == DEFAULT_DOCKER_API_VERSION
def test_from_env_without_timeout_uses_default(self):
- client = docker.from_env()
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_default_pool_size_from_env_unix(self, mock_obj):
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_default_pool_size_from_env_win(self, mock_obj):
+ client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION)
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=DEFAULT_MAX_POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux'
+ )
+ @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool")
+ def test_pool_size_from_env_unix(self, mock_obj):
+ client = docker.from_env(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ base_url = "{base_url}/v{version}/_ping".format(
+ base_url=client.api.base_url,
+ version=client.api._version
+ )
+
+ mock_obj.assert_called_once_with(base_url,
+ "/var/run/docker.sock",
+ 60,
+ maxsize=POOL_SIZE
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows'
+ )
+ @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool")
+ def test_pool_size_from_env_win(self, mock_obj):
+ client = docker.from_env(
+ version=DEFAULT_DOCKER_API_VERSION,
+ max_pool_size=POOL_SIZE
+ )
+ mock_obj.return_value.urlopen.return_value.status = 200
+ client.ping()
+
+ mock_obj.assert_called_once_with("//./pipe/docker_engine",
+ 60,
+ maxsize=POOL_SIZE
+ )
diff --git a/tests/unit/context_test.py b/tests/unit/context_test.py
new file mode 100644
index 0000000..6d6d672
--- /dev/null
+++ b/tests/unit/context_test.py
@@ -0,0 +1,49 @@
+import unittest
+import docker
+import pytest
+from docker.constants import DEFAULT_UNIX_SOCKET
+from docker.constants import DEFAULT_NPIPE
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.context import ContextAPI, Context
+
+
+class BaseContextTest(unittest.TestCase):
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='Linux specific path check'
+ )
+ def test_url_compatibility_on_linux(self):
+ c = Context("test")
+ assert c.Host == DEFAULT_UNIX_SOCKET.strip("http+")
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Windows specific path check'
+ )
+ def test_url_compatibility_on_windows(self):
+ c = Context("test")
+ assert c.Host == DEFAULT_NPIPE
+
+ def test_fail_on_default_context_create(self):
+ with pytest.raises(docker.errors.ContextException):
+ ContextAPI.create_context("default")
+
+ def test_default_in_context_list(self):
+ found = False
+ ctx = ContextAPI.contexts()
+ for c in ctx:
+ if c.Name == "default":
+ found = True
+ assert found is True
+
+ def test_get_current_context(self):
+ assert ContextAPI.get_current_context().Name == "default"
+
+ def test_https_host(self):
+ c = Context("test", host="tcp://testdomain:8080", tls=True)
+ assert c.Host == "https://testdomain:8080"
+
+ def test_context_inspect_without_params(self):
+ ctx = ContextAPI.inspect_context()
+ assert ctx["Name"] == "default"
+ assert ctx["Metadata"]["StackOrchestrator"] == "swarm"
+ assert ctx["Endpoints"]["docker"]["Host"] in [
+ DEFAULT_NPIPE, DEFAULT_UNIX_SOCKET.strip("http+")]
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
index 0689d07..a0a171b 100644
--- a/tests/unit/dockertypes_test.py
+++ b/tests/unit/dockertypes_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import unittest
import pytest
@@ -15,7 +13,7 @@ from docker.types.services import convert_service_ports
try:
from unittest import mock
except: # noqa: E722
- import mock
+ from unittest import mock
def create_host_config(*args, **kwargs):
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
index 2134f86..f8c3a66 100644
--- a/tests/unit/errors_test.py
+++ b/tests/unit/errors_test.py
@@ -101,17 +101,17 @@ class APIErrorTest(unittest.TestCase):
assert err.is_error() is True
def test_create_error_from_exception(self):
- resp = requests.Response()
- resp.status_code = 500
- err = APIError('')
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
try:
- resp.raise_for_status()
- except requests.exceptions.HTTPError as e:
- try:
- create_api_error_from_http_exception(e)
- except APIError as e:
- err = e
- assert err.is_server_error() is True
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
class ContainerErrorTest(unittest.TestCase):
@@ -126,7 +126,7 @@ class ContainerErrorTest(unittest.TestCase):
err = ContainerError(container, exit_status, command, image, stderr)
msg = ("Command '{}' in image '{}' returned non-zero exit status {}"
- ).format(command, image, exit_status, stderr)
+ ).format(command, image, exit_status)
assert str(err) == msg
def test_container_with_stderr(self):
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index e609b64..4c93329 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -1,7 +1,8 @@
-from . import fake_stat
from docker import constants
-CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
+from . import fake_stat
+
+CURRENT_VERSION = f'v{constants.DEFAULT_DOCKER_API_VERSION}'
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
@@ -16,6 +17,8 @@ FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
FAKE_NODE_ID = '24ifsmvkjbyhk'
+FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o'
+FAKE_SECRET_NAME = 'super_secret'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
@@ -511,102 +514,108 @@ def post_fake_network_disconnect():
return 200, None
+def post_fake_secret():
+ status_code = 200
+ response = {'ID': FAKE_SECRET_ID}
+ return status_code, response
+
+
# Maps real api url to fake response callback
prefix = 'http+docker://localhost'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
- '{0}/version'.format(prefix):
+ f'{prefix}/version':
get_fake_version,
- '{1}/{0}/version'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/version':
get_fake_version,
- '{1}/{0}/info'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/info':
get_fake_info,
- '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/auth':
post_fake_auth,
- '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/_ping':
get_fake_ping,
- '{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/search':
get_fake_search,
- '{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/json':
get_fake_images,
- '{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/history':
get_fake_image_history,
- '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_import_image,
- '{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/json':
get_fake_containers,
- '{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start':
post_fake_start_container,
- '{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize':
post_fake_resize_container,
- '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json':
get_fake_inspect_container,
- '{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename':
post_fake_rename_container,
- '{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag':
post_fake_tag_image,
- '{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait':
get_fake_wait,
- '{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs':
get_fake_logs,
- '{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes':
get_fake_diff,
- '{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export':
get_fake_export,
- '{1}/{0}/containers/3cc2351ab11b/update'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update':
post_fake_update_container,
- '{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec':
post_fake_exec_create,
- '{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start':
post_fake_exec_start,
- '{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json':
get_fake_exec_inspect,
- '{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize':
post_fake_exec_resize,
- '{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats':
get_fake_stats,
- '{1}/{0}/containers/3cc2351ab11b/top'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top':
get_fake_top,
- '{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop':
post_fake_stop_container,
- '{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill':
post_fake_kill_container,
- '{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause':
post_fake_pause_container,
- '{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause':
post_fake_unpause_container,
- '{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart':
post_fake_restart_container,
- '{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b':
delete_fake_remove_container,
- '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_image_create,
- '{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128':
delete_fake_remove_image,
- '{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get':
get_fake_get_image,
- '{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/load':
post_fake_load_image,
- '{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/json':
get_fake_inspect_image,
- '{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/insert':
get_fake_insert_image,
- '{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/images/test_image/push':
post_fake_push,
- '{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/commit':
post_fake_commit,
- '{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/containers/create':
post_fake_create_container,
- '{1}/{0}/build'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/build':
post_fake_build_container,
- '{1}/{0}/events'.format(CURRENT_VERSION, prefix):
+ f'{prefix}/{CURRENT_VERSION}/events':
get_fake_events,
- ('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
+ (f'{prefix}/{CURRENT_VERSION}/volumes', 'GET'):
get_fake_volume_list,
- ('{1}/{0}/volumes/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/volumes/create', 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
@@ -620,11 +629,11 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
- ('{1}/{0}/swarm/join'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/swarm/join', 'POST'):
post_fake_join_swarm,
- ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ (f'{prefix}/{CURRENT_VERSION}/networks', 'GET'):
get_fake_network_list,
- ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ (f'{prefix}/{CURRENT_VERSION}/networks/create', 'POST'):
post_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
@@ -642,4 +651,6 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_disconnect,
+ f'{prefix}/{CURRENT_VERSION}/secrets/create':
+ post_fake_secret,
}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
index 2147bfd..1663ef1 100644
--- a/tests/unit/fake_api_client.py
+++ b/tests/unit/fake_api_client.py
@@ -1,12 +1,13 @@
import copy
-import docker
+import docker
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from . import fake_api
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class CopyReturnMagicMock(mock.MagicMock):
@@ -14,7 +15,7 @@ class CopyReturnMagicMock(mock.MagicMock):
A MagicMock which deep copies every return value.
"""
def _mock_call(self, *args, **kwargs):
- ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ ret = super()._mock_call(*args, **kwargs)
if isinstance(ret, (dict, list)):
ret = copy.deepcopy(ret)
return ret
@@ -30,7 +31,7 @@ def make_fake_api_client(overrides=None):
if overrides is None:
overrides = {}
- api_client = docker.APIClient()
+ api_client = docker.APIClient(version=DEFAULT_DOCKER_API_VERSION)
mock_attrs = {
'build.return_value': fake_api.FAKE_IMAGE_ID,
'commit.return_value': fake_api.post_fake_commit()[1],
@@ -39,6 +40,7 @@ def make_fake_api_client(overrides=None):
fake_api.post_fake_create_container()[1],
'create_host_config.side_effect': api_client.create_host_config,
'create_network.return_value': fake_api.post_fake_network()[1],
+ 'create_secret.return_value': fake_api.post_fake_secret()[1],
'exec_create.return_value': fake_api.post_fake_exec_create()[1],
'exec_start.return_value': fake_api.post_fake_exec_start()[1],
'images.return_value': fake_api.get_fake_images()[1],
@@ -50,6 +52,7 @@ def make_fake_api_client(overrides=None):
'networks.return_value': fake_api.get_fake_network_list()[1],
'start.return_value': None,
'wait.return_value': {'StatusCode': 0},
+ 'version.return_value': fake_api.get_fake_version()
}
mock_attrs.update(overrides)
mock_client = CopyReturnMagicMock(**mock_attrs)
@@ -62,6 +65,6 @@ def make_fake_client(overrides=None):
"""
Returns a Client with a fake APIClient.
"""
- client = docker.DockerClient()
+ client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION)
client.api = make_fake_api_client(overrides)
return client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
index da5f0ab..c7aa46b 100644
--- a/tests/unit/models_containers_test.py
+++ b/tests/unit/models_containers_test.py
@@ -233,7 +233,7 @@ class ContainerCollectionTest(unittest.TestCase):
assert container.id == FAKE_CONTAINER_ID
client.api.pull.assert_called_with(
- 'alpine', platform=None, tag=None, stream=True
+ 'alpine', platform=None, tag='latest', all_tags=False, stream=True
)
def test_run_with_error(self):
@@ -450,7 +450,7 @@ class ContainerTest(unittest.TestCase):
container = client.containers.get(FAKE_CONTAINER_ID)
container.get_archive('foo')
client.api.get_archive.assert_called_with(
- FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
+ FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE, False
)
def test_image(self):
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
index fd894ab..f3ca0be 100644
--- a/tests/unit/models_images_test.py
+++ b/tests/unit/models_images_test.py
@@ -44,9 +44,25 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull(self):
client = make_fake_client()
- image = client.images.pull('test_image:latest')
+ image = client.images.pull('test_image:test')
client.api.pull.assert_called_with(
- 'test_image', tag='latest', stream=True
+ 'test_image', tag='test', all_tags=False, stream=True
+ )
+ client.api.inspect_image.assert_called_with('test_image:test')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_pull_tag_precedence(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image:latest', tag='test')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='test', all_tags=False, stream=True
+ )
+ client.api.inspect_image.assert_called_with('test_image:test')
+
+ image = client.images.pull('test_image')
+ client.api.pull.assert_called_with(
+ 'test_image', tag='latest', all_tags=False, stream=True
)
client.api.inspect_image.assert_called_with('test_image:latest')
assert isinstance(image, Image)
@@ -54,9 +70,9 @@ class ImageCollectionTest(unittest.TestCase):
def test_pull_multiple(self):
client = make_fake_client()
- images = client.images.pull('test_image')
+ images = client.images.pull('test_image', all_tags=True)
client.api.pull.assert_called_with(
- 'test_image', tag=None, stream=True
+ 'test_image', tag='latest', all_tags=True, stream=True
)
client.api.images.assert_called_with(
all=False, name='test_image', filters=None
@@ -96,6 +112,11 @@ class ImageCollectionTest(unittest.TestCase):
client.images.search('test')
client.api.search.assert_called_with('test')
+ def test_search_limit(self):
+ client = make_fake_client()
+ client.images.search('test', limit=5)
+ client.api.search.assert_called_with('test', limit=5)
+
class ImageTest(unittest.TestCase):
def test_short_id(self):
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
index 5af24ee..11dea29 100644
--- a/tests/unit/models_resources_test.py
+++ b/tests/unit/models_resources_test.py
@@ -16,7 +16,7 @@ class ModelTest(unittest.TestCase):
def test_hash(self):
client = make_fake_client()
container1 = client.containers.get(FAKE_CONTAINER_ID)
- my_set = set([container1])
+ my_set = {container1}
assert len(my_set) == 1
container2 = client.containers.get(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_secrets_test.py b/tests/unit/models_secrets_test.py
new file mode 100644
index 0000000..1c261a8
--- /dev/null
+++ b/tests/unit/models_secrets_test.py
@@ -0,0 +1,11 @@
+import unittest
+
+from .fake_api_client import make_fake_client
+from .fake_api import FAKE_SECRET_NAME
+
+
+class CreateServiceTest(unittest.TestCase):
+ def test_secrets_repr(self):
+ client = make_fake_client()
+ secret = client.secrets.create(name="super_secret", data="secret")
+ assert secret.__repr__() == f"<Secret: '{FAKE_SECRET_NAME}'>"
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
index a4ac50c..b9192e4 100644
--- a/tests/unit/models_services_test.py
+++ b/tests/unit/models_services_test.py
@@ -28,6 +28,7 @@ class CreateServiceKwargsTest(unittest.TestCase):
'constraints': ['foo=bar'],
'preferences': ['bar=baz'],
'platforms': [('x86_64', 'linux')],
+ 'maxreplicas': 1
})
task_template = kwargs.pop('task_template')
@@ -39,21 +40,22 @@ class CreateServiceKwargsTest(unittest.TestCase):
'update_config': {'update': 'config'},
'endpoint_spec': {'blah': 'blah'},
}
- assert set(task_template.keys()) == set([
+ assert set(task_template.keys()) == {
'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
'LogDriver', 'Networks'
- ])
+ }
assert task_template['Placement'] == {
'Constraints': ['foo=bar'],
'Preferences': ['bar=baz'],
'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}],
+ 'MaxReplicas': 1,
}
assert task_template['LogDriver'] == {
'Name': 'logdriver',
'Options': {'foo': 'bar'}
}
assert task_template['Networks'] == [{'Target': 'somenet'}]
- assert set(task_template['ContainerSpec'].keys()) == set([
+ assert set(task_template['ContainerSpec'].keys()) == {
'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
'Labels', 'Mounts', 'StopGracePeriod'
- ])
+ }
diff --git a/tests/unit/sshadapter_test.py b/tests/unit/sshadapter_test.py
new file mode 100644
index 0000000..874239a
--- /dev/null
+++ b/tests/unit/sshadapter_test.py
@@ -0,0 +1,39 @@
+import unittest
+import docker
+from docker.transport.sshconn import SSHSocket
+
+
+class SSHAdapterTest(unittest.TestCase):
+ @staticmethod
+ def test_ssh_hostname_prefix_trim():
+ conn = docker.transport.SSHHTTPAdapter(
+ base_url="ssh://user@hostname:1234", shell_out=True)
+ assert conn.ssh_host == "user@hostname:1234"
+
+ @staticmethod
+ def test_ssh_parse_url():
+ c = SSHSocket(host="user@hostname:1234")
+ assert c.host == "hostname"
+ assert c.port == "1234"
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_only():
+ c = SSHSocket(host="hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user is None
+
+ @staticmethod
+ def test_ssh_parse_user_and_hostname():
+ c = SSHSocket(host="user@hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_and_port():
+ c = SSHSocket(host="hostname:22")
+ assert c.host == "hostname"
+ assert c.port == "22"
+ assert c.user is None
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 73b7336..41a87f2 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -32,30 +32,30 @@ class SSLAdapterTest(unittest.TestCase):
class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
- (('countryName', u'US'),),
- (('stateOrProvinceName', u'California'),),
- (('localityName', u'San Francisco'),),
- (('organizationName', u'Docker Inc'),),
- (('organizationalUnitName', u'Docker-Python'),),
- (('commonName', u'localhost'),),
- (('emailAddress', u'info@docker.com'),)
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
),
'notAfter': 'Mar 25 23:08:23 2030 GMT',
- 'notBefore': u'Mar 25 23:08:23 2016 GMT',
- 'serialNumber': u'BD5F894C839C548F',
+ 'notBefore': 'Mar 25 23:08:23 2016 GMT',
+ 'serialNumber': 'BD5F894C839C548F',
'subject': (
- (('countryName', u'US'),),
- (('stateOrProvinceName', u'California'),),
- (('localityName', u'San Francisco'),),
- (('organizationName', u'Docker Inc'),),
- (('organizationalUnitName', u'Docker-Python'),),
- (('commonName', u'localhost'),),
- (('emailAddress', u'info@docker.com'),)
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
),
'subjectAltName': (
- ('DNS', u'localhost'),
- ('DNS', u'*.gensokyo.jp'),
- ('IP Address', u'127.0.0.1'),
+ ('DNS', 'localhost'),
+ ('DNS', '*.gensokyo.jp'),
+ ('IP Address', '127.0.0.1'),
),
'version': 3
}
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
index 4385380..aee1b9e 100644
--- a/tests/unit/swarm_test.py
+++ b/tests/unit/swarm_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import json
from . import fake_api
diff --git a/tests/unit/utils_build_test.py b/tests/unit/utils_build_test.py
index 012f15b..9f18388 100644
--- a/tests/unit/utils_build_test.py
+++ b/tests/unit/utils_build_test.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-
import os
import os.path
import shutil
@@ -82,7 +80,7 @@ class ExcludePathsTest(unittest.TestCase):
assert sorted(paths) == sorted(set(paths))
def test_wildcard_exclude(self):
- assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
+ assert self.exclude(['*']) == {'Dockerfile', '.dockerignore'}
def test_exclude_dockerfile_dockerignore(self):
"""
@@ -99,18 +97,18 @@ class ExcludePathsTest(unittest.TestCase):
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
- assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(
- ['Dockerfile.alt', '.dockerignore']
- )
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == {
+ 'Dockerfile.alt', '.dockerignore'
+ }
assert self.exclude(
['*'], dockerfile='foo/Dockerfile3'
- ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+ ) == convert_paths({'foo/Dockerfile3', '.dockerignore'})
# https://github.com/docker/docker-py/issues/1956
assert self.exclude(
['*'], dockerfile='./foo/Dockerfile3'
- ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+ ) == convert_paths({'foo/Dockerfile3', '.dockerignore'})
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
@@ -119,56 +117,56 @@ class ExcludePathsTest(unittest.TestCase):
def test_single_filename(self):
assert self.exclude(['a.py']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
def test_single_filename_leading_dot_slash(self):
assert self.exclude(['./a.py']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
# As odd as it sounds, a filename pattern with a trailing slash on the
# end *will* result in that file being excluded.
def test_single_filename_trailing_slash(self):
assert self.exclude(['a.py/']) == convert_paths(
- self.all_paths - set(['a.py'])
+ self.all_paths - {'a.py'}
)
def test_wildcard_filename_start(self):
assert self.exclude(['*.py']) == convert_paths(
- self.all_paths - set(['a.py', 'b.py', 'cde.py'])
+ self.all_paths - {'a.py', 'b.py', 'cde.py'}
)
def test_wildcard_with_exception(self):
assert self.exclude(['*.py', '!b.py']) == convert_paths(
- self.all_paths - set(['a.py', 'cde.py'])
+ self.all_paths - {'a.py', 'cde.py'}
)
def test_wildcard_with_wildcard_exception(self):
assert self.exclude(['*.*', '!*.go']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
- ])
+ }
)
def test_wildcard_filename_end(self):
assert self.exclude(['a.*']) == convert_paths(
- self.all_paths - set(['a.py', 'a.go'])
+ self.all_paths - {'a.py', 'a.go'}
)
def test_question_mark(self):
assert self.exclude(['?.py']) == convert_paths(
- self.all_paths - set(['a.py', 'b.py'])
+ self.all_paths - {'a.py', 'b.py'}
)
def test_single_subdir_single_filename(self):
assert self.exclude(['foo/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_single_subdir_single_filename_leading_slash(self):
assert self.exclude(['/foo/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_exclude_include_absolute_path(self):
@@ -176,57 +174,57 @@ class ExcludePathsTest(unittest.TestCase):
assert exclude_paths(
base,
['/*', '!/*.py']
- ) == set(['a.py', 'b.py'])
+ ) == {'a.py', 'b.py'}
def test_single_subdir_with_path_traversal(self):
assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py'])
+ self.all_paths - {'foo/a.py'}
)
def test_single_subdir_wildcard_filename(self):
assert self.exclude(['foo/*.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'foo/b.py'])
+ self.all_paths - {'foo/a.py', 'foo/b.py'}
)
def test_wildcard_subdir_single_filename(self):
assert self.exclude(['*/a.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'bar/a.py'])
+ self.all_paths - {'foo/a.py', 'bar/a.py'}
)
def test_wildcard_subdir_wildcard_filename(self):
assert self.exclude(['*/*.py']) == convert_paths(
- self.all_paths - set(['foo/a.py', 'foo/b.py', 'bar/a.py'])
+ self.all_paths - {'foo/a.py', 'foo/b.py', 'bar/a.py'}
)
def test_directory(self):
assert self.exclude(['foo']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py',
'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_trailing_slash(self):
assert self.exclude(['foo']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
@pytest.mark.skipif(
@@ -234,21 +232,21 @@ class ExcludePathsTest(unittest.TestCase):
)
def test_directory_with_subdir_exception_win32_pathsep(self):
assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
- self.all_paths - set([
+ self.all_paths - {
'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3'
- ])
+ }
)
def test_subdirectory(self):
assert self.exclude(['foo/bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
@pytest.mark.skipif(
@@ -256,33 +254,33 @@ class ExcludePathsTest(unittest.TestCase):
)
def test_subdirectory_win32_pathsep(self):
assert self.exclude(['foo\\bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
def test_double_wildcard(self):
assert self.exclude(['**/a.py']) == convert_paths(
- self.all_paths - set(
- ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
- )
+ self.all_paths - {
+ 'a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py'
+ }
)
assert self.exclude(['foo/**/bar']) == convert_paths(
- self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ self.all_paths - {'foo/bar', 'foo/bar/a.py'}
)
def test_single_and_double_wildcard(self):
assert self.exclude(['**/target/*/*']) == convert_paths(
- self.all_paths - set(
- ['target/subdir/file.txt',
+ self.all_paths - {
+ 'target/subdir/file.txt',
'subdir/target/subdir/file.txt',
- 'subdir/subdir2/target/subdir/file.txt']
- )
+ 'subdir/subdir2/target/subdir/file.txt'
+ }
)
def test_trailing_double_wildcard(self):
assert self.exclude(['subdir/**']) == convert_paths(
- self.all_paths - set(
- ['subdir/file.txt',
+ self.all_paths - {
+ 'subdir/file.txt',
'subdir/target/file.txt',
'subdir/target/subdir/file.txt',
'subdir/subdir2/file.txt',
@@ -292,16 +290,16 @@ class ExcludePathsTest(unittest.TestCase):
'subdir/target/subdir',
'subdir/subdir2',
'subdir/subdir2/target',
- 'subdir/subdir2/target/subdir']
- )
+ 'subdir/subdir2/target/subdir'
+ }
)
def test_double_wildcard_with_exception(self):
assert self.exclude(['**', '!bar', '!foo/bar']) == convert_paths(
- set([
+ {
'foo/bar', 'foo/bar/a.py', 'bar', 'bar/a.py', 'Dockerfile',
'.dockerignore',
- ])
+ }
)
def test_include_wildcard(self):
@@ -324,7 +322,7 @@ class ExcludePathsTest(unittest.TestCase):
assert exclude_paths(
base,
['*.md', '!README*.md', 'README-secret.md']
- ) == set(['README.md', 'README-bis.md'])
+ ) == {'README.md', 'README-bis.md'}
def test_parent_directory(self):
base = make_tree(
@@ -335,12 +333,12 @@ class ExcludePathsTest(unittest.TestCase):
# Dockerignore reference stipulates that absolute paths are
# equivalent to relative paths, hence /../foo should be
# equivalent to ../foo. It also stipulates that paths are run
- # through Go's filepath.Clean, which explicitely "replace
+ # through Go's filepath.Clean, which explicitly "replace
# "/.." by "/" at the beginning of a path".
assert exclude_paths(
base,
['../a.py', '/../b.py']
- ) == set(['c.py'])
+ ) == {'c.py'}
class TarTest(unittest.TestCase):
@@ -374,14 +372,14 @@ class TarTest(unittest.TestCase):
'.dockerignore',
]
- expected_names = set([
+ expected_names = {
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
- ])
+ }
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
@@ -413,7 +411,7 @@ class TarTest(unittest.TestCase):
with pytest.raises(IOError) as ei:
tar(base)
- assert 'Can not read file in context: {}'.format(full_path) in (
+ assert f'Can not read file in context: {full_path}' in (
ei.exconly()
)
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
index b0934f9..83e04a1 100644
--- a/tests/unit/utils_config_test.py
+++ b/tests/unit/utils_config_test.py
@@ -11,7 +11,7 @@ from docker.utils import config
try:
from unittest import mock
except ImportError:
- import mock
+ from unittest import mock
class FindConfigFileTest(unittest.TestCase):
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
index f7aefd0..821ebe4 100644
--- a/tests/unit/utils_json_stream_test.py
+++ b/tests/unit/utils_json_stream_test.py
@@ -1,11 +1,7 @@
-# encoding: utf-8
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
-class TestJsonSplitter(object):
+class TestJsonSplitter:
def test_json_splitter_no_object(self):
data = '{"foo": "bar'
@@ -20,7 +16,7 @@ class TestJsonSplitter(object):
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
-class TestStreamAsText(object):
+class TestStreamAsText:
def test_stream_with_non_utf_unicode_character(self):
stream = [b'\xed\xf3\xf3']
@@ -28,12 +24,12 @@ class TestStreamAsText(object):
assert output == '���'
def test_stream_with_utf_character(self):
- stream = ['ěĝ'.encode('utf-8')]
+ stream = ['ěĝ'.encode()]
output, = stream_as_text(stream)
assert output == 'ěĝ'
-class TestJsonStream(object):
+class TestJsonStream:
def test_with_falsy_entries(self):
stream = [
diff --git a/tests/unit/utils_proxy_test.py b/tests/unit/utils_proxy_test.py
index ff0e14b..2da6040 100644
--- a/tests/unit/utils_proxy_test.py
+++ b/tests/unit/utils_proxy_test.py
@@ -1,7 +1,4 @@
-# -*- coding: utf-8 -*-
-
import unittest
-import six
from docker.utils.proxy import ProxyConfig
@@ -65,7 +62,7 @@ class ProxyConfigTest(unittest.TestCase):
# Proxy config is non null, env is None.
self.assertSetEqual(
set(CONFIG.inject_proxy_environment(None)),
- set(['{}={}'.format(k, v) for k, v in six.iteritems(ENV)]))
+ {f'{k}={v}' for k, v in ENV.items()})
# Proxy config is null, env is None.
self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None)
@@ -74,7 +71,7 @@ class ProxyConfigTest(unittest.TestCase):
# Proxy config is non null, env is non null
actual = CONFIG.inject_proxy_environment(env)
- expected = ['{}={}'.format(k, v) for k, v in six.iteritems(ENV)] + env
+ expected = [f'{k}={v}' for k, v in ENV.items()] + env
# It's important that the first 8 variables are the ones from the proxy
# config, and the last 2 are the ones from the input environment
self.assertSetEqual(set(actual[:8]), set(expected[:8]))
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index d9cb002..802d919 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -1,31 +1,22 @@
-# -*- coding: utf-8 -*-
-
import base64
import json
import os
import os.path
import shutil
-import sys
import tempfile
import unittest
-
+import pytest
from docker.api.client import APIClient
-from docker.constants import IS_WINDOWS_PLATFORM
+from docker.constants import IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION
from docker.errors import DockerException
-from docker.utils import (
- convert_filters, convert_volume_binds, decode_json_header, kwargs_from_env,
- parse_bytes, parse_devices, parse_env_file, parse_host,
- parse_repository_tag, split_command, update_headers,
-)
-
+from docker.utils import (convert_filters, convert_volume_binds,
+ decode_json_header, kwargs_from_env, parse_bytes,
+ parse_devices, parse_env_file, parse_host,
+ parse_repository_tag, split_command, update_headers)
from docker.utils.ports import build_port_bindings, split_port
from docker.utils.utils import format_environment
-import pytest
-
-import six
-
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
'testdata/certs',
@@ -41,7 +32,7 @@ class DecoratorsTest(unittest.TestCase):
def f(self, headers=None):
return headers
- client = APIClient()
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
client._general_configs = {}
g = update_headers(f)
@@ -92,6 +83,7 @@ class KwargsFromEnvTest(unittest.TestCase):
assert kwargs['tls'].verify
parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
try:
client = APIClient(**kwargs)
assert parsed_host == client.base_url
@@ -112,6 +104,7 @@ class KwargsFromEnvTest(unittest.TestCase):
assert kwargs['tls'].assert_hostname is True
assert kwargs['tls'].verify is False
parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
try:
client = APIClient(**kwargs)
assert parsed_host == client.base_url
@@ -199,22 +192,22 @@ class ConverVolumeBindsTest(unittest.TestCase):
assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_unicode_bytes_input(self):
- expected = [u'/mnt/지연:/unicode/박:rw']
+ expected = ['/mnt/지연:/unicode/박:rw']
data = {
- u'/mnt/지연'.encode('utf-8'): {
- 'bind': u'/unicode/박'.encode('utf-8'),
+ '/mnt/지연'.encode(): {
+ 'bind': '/unicode/박'.encode(),
'mode': 'rw'
}
}
assert convert_volume_binds(data) == expected
def test_convert_volume_binds_unicode_unicode_input(self):
- expected = [u'/mnt/지연:/unicode/박:rw']
+ expected = ['/mnt/지연:/unicode/박:rw']
data = {
- u'/mnt/지연': {
- 'bind': u'/unicode/박',
+ '/mnt/지연': {
+ 'bind': '/unicode/박',
'mode': 'rw'
}
}
@@ -363,14 +356,14 @@ class ParseRepositoryTagTest(unittest.TestCase):
)
def test_index_image_sha(self):
- assert parse_repository_tag("root@sha256:{0}".format(self.sha)) == (
- "root", "sha256:{0}".format(self.sha)
+ assert parse_repository_tag(f"root@sha256:{self.sha}") == (
+ "root", f"sha256:{self.sha}"
)
def test_private_reg_image_sha(self):
assert parse_repository_tag(
- "url:5000/repo@sha256:{0}".format(self.sha)
- ) == ("url:5000/repo", "sha256:{0}".format(self.sha))
+ f"url:5000/repo@sha256:{self.sha}"
+ ) == ("url:5000/repo", f"sha256:{self.sha}")
class ParseDeviceTest(unittest.TestCase):
@@ -447,11 +440,7 @@ class ParseBytesTest(unittest.TestCase):
parse_bytes("127.0.0.1K")
def test_parse_bytes_float(self):
- with pytest.raises(DockerException):
- parse_bytes("1.5k")
-
- def test_parse_bytes_maxint(self):
- assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
+ assert parse_bytes("1.5k") == 1536
class UtilsTest(unittest.TestCase):
@@ -471,20 +460,13 @@ class UtilsTest(unittest.TestCase):
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
- if six.PY3:
- data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
- else:
- data = base64.urlsafe_b64encode(json.dumps(obj))
+ data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
decoded_data = decode_json_header(data)
assert obj == decoded_data
class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
- assert split_command(u'echo μμ') == ['echo', 'μμ']
-
- @pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
- def test_split_command_with_bytes(self):
assert split_command('echo μμ') == ['echo', 'μμ']
@@ -549,6 +531,12 @@ class PortsTest(unittest.TestCase):
assert internal_port == ["2000"]
assert external_port == [("2001:abcd:ef00::2", "1000")]
+ def test_split_port_with_ipv6_square_brackets_address(self):
+ internal_port, external_port = split_port(
+ "[2001:abcd:ef00::2]:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
+
def test_split_port_invalid(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000:tcp")
@@ -628,7 +616,7 @@ class FormatEnvironmentTest(unittest.TestCase):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
}
- assert format_environment(env_dict) == [u'ARTIST_NAME=송지은']
+ assert format_environment(env_dict) == ['ARTIST_NAME=송지은']
def test_format_env_no_value(self):
env_dict = {