summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Pleau <jason@jpleau.ca>2018-03-30 15:31:04 -0400
committerJason Pleau <jason@jpleau.ca>2018-03-30 15:31:04 -0400
commitfa27a6cfe201f8d4241fff59aaa0867cb238122f (patch)
tree39cb120dc1156f582169450814904ffa716a4f32
parent7c25f0634f5ff158a492426fa6238e4e6351008b (diff)
New upstream version 3.2.1
-rw-r--r--MANIFEST.in1
-rw-r--r--PKG-INFO112
-rw-r--r--README.md83
-rw-r--r--README.rst97
-rw-r--r--docker.egg-info/PKG-INFO119
-rw-r--r--docker.egg-info/SOURCES.txt128
-rw-r--r--docker.egg-info/dependency_links.txt (renamed from docker_py.egg-info/dependency_links.txt)0
-rw-r--r--docker.egg-info/not-zip-safe (renamed from docker_py.egg-info/not-zip-safe)0
-rw-r--r--docker.egg-info/requires.txt21
-rw-r--r--docker.egg-info/top_level.txt (renamed from docker_py.egg-info/top_level.txt)0
-rw-r--r--docker/__init__.py7
-rw-r--r--docker/api/__init__.py10
-rw-r--r--docker/api/build.py259
-rw-r--r--docker/api/client.py456
-rw-r--r--docker/api/config.py91
-rw-r--r--docker/api/container.py1139
-rw-r--r--docker/api/daemon.py135
-rw-r--r--docker/api/exec_api.py118
-rw-r--r--docker/api/image.py432
-rw-r--r--docker/api/network.py203
-rw-r--r--docker/api/plugin.py251
-rw-r--r--docker/api/secret.py102
-rw-r--r--docker/api/service.py397
-rw-r--r--docker/api/swarm.py319
-rw-r--r--docker/api/volume.py129
-rw-r--r--docker/auth.py (renamed from docker/auth/auth.py)143
-rw-r--r--docker/auth/__init__.py8
-rw-r--r--docker/client.py591
-rw-r--r--docker/constants.py6
-rw-r--r--docker/errors.py107
-rw-r--r--docker/models/__init__.py0
-rw-r--r--docker/models/configs.py69
-rw-r--r--docker/models/containers.py1056
-rw-r--r--docker/models/images.py443
-rw-r--r--docker/models/networks.py215
-rw-r--r--docker/models/nodes.py107
-rw-r--r--docker/models/plugins.py200
-rw-r--r--docker/models/resource.py93
-rw-r--r--docker/models/secrets.py69
-rw-r--r--docker/models/services.py352
-rw-r--r--docker/models/swarm.py168
-rw-r--r--docker/models/volumes.py99
-rw-r--r--docker/ssladapter/__init__.py1
-rw-r--r--docker/tls.py53
-rw-r--r--docker/transport/__init__.py3
-rw-r--r--docker/transport/npipeconn.py11
-rw-r--r--docker/transport/npipesocket.py1
-rw-r--r--docker/transport/ssladapter.py (renamed from docker/ssladapter/ssladapter.py)5
-rw-r--r--docker/transport/unixconn.py33
-rw-r--r--docker/types/__init__.py10
-rw-r--r--docker/types/containers.py506
-rw-r--r--docker/types/daemon.py62
-rw-r--r--docker/types/healthcheck.py88
-rw-r--r--docker/types/networks.py111
-rw-r--r--docker/types/services.py600
-rw-r--r--docker/types/swarm.py99
-rw-r--r--docker/utils/__init__.py14
-rw-r--r--docker/utils/build.py219
-rw-r--r--docker/utils/config.py66
-rw-r--r--docker/utils/decorators.py37
-rw-r--r--docker/utils/fnmatch.py114
-rw-r--r--docker/utils/json_stream.py80
-rw-r--r--docker/utils/ports.py83
-rw-r--r--docker/utils/ports/__init__.py4
-rw-r--r--docker/utils/ports/ports.py92
-rw-r--r--docker/utils/socket.py34
-rw-r--r--docker/utils/types.py7
-rw-r--r--docker/utils/utils.py736
-rw-r--r--docker/version.py2
-rw-r--r--docker_py.egg-info/PKG-INFO61
-rw-r--r--docker_py.egg-info/SOURCES.txt82
-rw-r--r--docker_py.egg-info/requires.txt10
-rw-r--r--requirements.txt24
-rw-r--r--setup.cfg2
-rw-r--r--setup.py57
-rw-r--r--test-requirements.txt5
-rw-r--r--tests/base.py48
-rw-r--r--tests/helpers.py181
-rw-r--r--tests/integration/api_build_test.py474
-rw-r--r--tests/integration/api_client_test.py117
-rw-r--r--tests/integration/api_config_test.py72
-rw-r--r--tests/integration/api_container_test.py (renamed from tests/integration/container_test.py)951
-rw-r--r--tests/integration/api_exec_test.py205
-rw-r--r--tests/integration/api_healthcheck_test.py68
-rw-r--r--tests/integration/api_image_test.py (renamed from tests/integration/image_test.py)163
-rw-r--r--tests/integration/api_network_test.py (renamed from tests/integration/network_test.py)229
-rw-r--r--tests/integration/api_plugin_test.py145
-rw-r--r--tests/integration/api_secret_test.py72
-rw-r--r--tests/integration/api_service_test.py1255
-rw-r--r--tests/integration/api_swarm_test.py207
-rw-r--r--tests/integration/api_test.py176
-rw-r--r--tests/integration/api_volume_test.py (renamed from tests/integration/volume_test.py)36
-rw-r--r--tests/integration/base.py125
-rw-r--r--tests/integration/build_test.py164
-rw-r--r--tests/integration/client_test.py49
-rw-r--r--tests/integration/conftest.py6
-rw-r--r--tests/integration/errors_test.py15
-rw-r--r--tests/integration/exec_test.py110
-rw-r--r--tests/integration/models_containers_test.py361
-rw-r--r--tests/integration/models_images_test.py136
-rw-r--r--tests/integration/models_networks_test.py70
-rw-r--r--tests/integration/models_nodes_test.py37
-rw-r--r--tests/integration/models_resources_test.py16
-rw-r--r--tests/integration/models_services_test.py335
-rw-r--r--tests/integration/models_swarm_test.py33
-rw-r--r--tests/integration/models_volumes_test.py30
-rw-r--r--tests/integration/regression_test.py36
-rw-r--r--tests/integration/service_test.py189
-rw-r--r--tests/integration/swarm_test.py145
-rw-r--r--tests/integration/testdata/dummy-plugin/config.json19
-rw-r--r--tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt0
-rw-r--r--tests/unit/api_build_test.py (renamed from tests/unit/build_test.py)81
-rw-r--r--tests/unit/api_container_test.py (renamed from tests/unit/container_test.py)923
-rw-r--r--tests/unit/api_exec_test.py83
-rw-r--r--tests/unit/api_image_test.py (renamed from tests/unit/image_test.py)82
-rw-r--r--tests/unit/api_network_test.py169
-rw-r--r--tests/unit/api_test.py194
-rw-r--r--tests/unit/api_volume_test.py (renamed from tests/unit/volume_test.py)83
-rw-r--r--tests/unit/auth_test.py405
-rw-r--r--tests/unit/client_test.py138
-rw-r--r--tests/unit/dockertypes_test.py470
-rw-r--r--tests/unit/errors_test.py133
-rw-r--r--tests/unit/exec_test.py103
-rw-r--r--tests/unit/fake_api.py181
-rw-r--r--tests/unit/fake_api_client.py61
-rw-r--r--tests/unit/models_containers_test.py538
-rw-r--r--tests/unit/models_images_test.py128
-rw-r--r--tests/unit/models_networks_test.py64
-rw-r--r--tests/unit/models_resources_test.py28
-rw-r--r--tests/unit/models_services_test.py53
-rw-r--r--tests/unit/network_test.py187
-rw-r--r--tests/unit/ssladapter_test.py23
-rw-r--r--tests/unit/swarm_test.py71
-rw-r--r--tests/unit/utils_config_test.py123
-rw-r--r--tests/unit/utils_json_stream_test.py62
-rw-r--r--tests/unit/utils_test.py787
136 files changed, 17949 insertions, 5143 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index ee6cdbb..41b3fa9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,3 +5,4 @@ include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
+recursive-include tests/integration/testdata *
diff --git a/PKG-INFO b/PKG-INFO
index cc96266..a02d191 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,61 +1,119 @@
Metadata-Version: 1.1
-Name: docker-py
-Version: 1.10.6
-Summary: Python client for Docker.
-Home-page: https://github.com/docker/docker-py/
+Name: docker
+Version: 3.2.1
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
-License: UNKNOWN
-Description: docker-py
- =========
+License: Apache License 2.0
+Description-Content-Type: UNKNOWN
+Description: Docker SDK for Python
+ =====================
|Build Status|
- A Python library for the Docker Remote API. It does everything the
- ``docker`` command does, but from within Python – run containers, manage
- them, pull/push images, etc.
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
Installation
------------
- The latest stable version is always available on PyPi.
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
::
- pip install docker-py
+ pip install docker
- Documentation
- -------------
+ If you are intending to connect to a docker host via TLS, add
+ ``docker[tls]`` to your requirements instead, or install with pip:
- |Documentation Status|
+ ::
+
+ pip install docker[tls]
+
+ Usage
+ -----
+
+ Connect to Docker using the default socket or the configuration in your
+ environment:
+
+ .. code:: python
+
+ import docker
+ client = docker.from_env()
+
+ You can run containers:
+
+ .. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
- `Read the full documentation
- here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
- available in the ``docs/`` directory.
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
- License
- -------
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
- Docker is licensed under the Apache License, Version 2.0. See LICENSE
- for full license text
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
- .. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
+Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
diff --git a/README.md b/README.md
index 876ed02..3ff124d 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,77 @@
-docker-py
-=========
+# Docker SDK for Python
-[![Build Status](https://travis-ci.org/docker/docker-py.png)](https://travis-ci.org/docker/docker-py)
+[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
-A Python library for the Docker Remote API. It does everything the `docker` command does, but from within Python – run containers, manage them, pull/push images, etc.
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
-Installation
-------------
+## Installation
-The latest stable version is always available on PyPi.
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
- pip install docker-py
+ pip install docker
-Documentation
--------------
+If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
-[![Documentation Status](https://readthedocs.org/projects/docker-py/badge/?version=latest)](https://readthedocs.org/projects/docker-py/?badge=latest)
+ pip install docker[tls]
-[Read the full documentation here](https://docker-py.readthedocs.io/en/latest/).
-The source is available in the `docs/` directory.
+## Usage
+Connect to Docker using the default socket or the configuration in your environment:
-License
--------
-Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print line.strip()
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/README.rst b/README.rst
index 757b82c..d0117e6 100644
--- a/README.rst
+++ b/README.rst
@@ -1,37 +1,94 @@
-docker-py
-=========
+Docker SDK for Python
+=====================
|Build Status|
-A Python library for the Docker Remote API. It does everything the
-``docker`` command does, but from within Python – run containers, manage
-them, pull/push images, etc.
+A Python library for the Docker Engine API. It lets you do anything the
+``docker`` command does, but from within Python apps – run containers,
+manage containers, manage Swarms, etc.
Installation
------------
-The latest stable version is always available on PyPi.
+The latest stable version `is available on
+PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+your ``requirements.txt`` file or install with pip:
::
- pip install docker-py
+ pip install docker
-Documentation
--------------
+If you are intending to connect to a docker host via TLS, add
+``docker[tls]`` to your requirements instead, or install with pip:
-|Documentation Status|
+::
+
+ pip install docker[tls]
+
+Usage
+-----
+
+Connect to Docker using the default socket or the configuration in your
+environment:
+
+.. code:: python
+
+ import docker
+ client = docker.from_env()
+
+You can run containers:
+
+.. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+You can run containers in the background:
+
+.. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+You can manage containers:
+
+.. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+You can stream logs:
+
+.. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+You can manage images:
+
+.. code:: python
-`Read the full documentation
-here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
-available in the ``docs/`` directory.
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
-License
--------
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
-Docker is licensed under the Apache License, Version 2.0. See LICENSE
-for full license text
+`Read the full documentation <https://docker-py.readthedocs.io>`__ to
+see everything you can do.
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
+.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
-.. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
new file mode 100644
index 0000000..a02d191
--- /dev/null
+++ b/docker.egg-info/PKG-INFO
@@ -0,0 +1,119 @@
+Metadata-Version: 1.1
+Name: docker
+Version: 3.2.1
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
+Author: Joffrey F
+Author-email: joffrey@docker.com
+License: Apache License 2.0
+Description-Content-Type: UNKNOWN
+Description: Docker SDK for Python
+ =====================
+
+ |Build Status|
+
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
+
+ Installation
+ ------------
+
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
+
+ ::
+
+ pip install docker
+
+ If you are intending to connect to a docker host via TLS, add
+ ``docker[tls]`` to your requirements instead, or install with pip:
+
+ ::
+
+ pip install docker[tls]
+
+ Usage
+ -----
+
+ Connect to Docker using the default socket or the configuration in your
+ environment:
+
+ .. code:: python
+
+ import docker
+ client = docker.from_env()
+
+ You can run containers:
+
+ .. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
+
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
+ :target: https://travis-ci.org/docker/docker-py
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Utilities
+Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
new file mode 100644
index 0000000..9a773e6
--- /dev/null
+++ b/docker.egg-info/SOURCES.txt
@@ -0,0 +1,128 @@
+LICENSE
+MANIFEST.in
+README.md
+README.rst
+requirements.txt
+setup.cfg
+setup.py
+test-requirements.txt
+docker/__init__.py
+docker/auth.py
+docker/client.py
+docker/constants.py
+docker/errors.py
+docker/tls.py
+docker/version.py
+docker.egg-info/PKG-INFO
+docker.egg-info/SOURCES.txt
+docker.egg-info/dependency_links.txt
+docker.egg-info/not-zip-safe
+docker.egg-info/requires.txt
+docker.egg-info/top_level.txt
+docker/api/__init__.py
+docker/api/build.py
+docker/api/client.py
+docker/api/config.py
+docker/api/container.py
+docker/api/daemon.py
+docker/api/exec_api.py
+docker/api/image.py
+docker/api/network.py
+docker/api/plugin.py
+docker/api/secret.py
+docker/api/service.py
+docker/api/swarm.py
+docker/api/volume.py
+docker/models/__init__.py
+docker/models/configs.py
+docker/models/containers.py
+docker/models/images.py
+docker/models/networks.py
+docker/models/nodes.py
+docker/models/plugins.py
+docker/models/resource.py
+docker/models/secrets.py
+docker/models/services.py
+docker/models/swarm.py
+docker/models/volumes.py
+docker/transport/__init__.py
+docker/transport/npipeconn.py
+docker/transport/npipesocket.py
+docker/transport/ssladapter.py
+docker/transport/unixconn.py
+docker/types/__init__.py
+docker/types/base.py
+docker/types/containers.py
+docker/types/daemon.py
+docker/types/healthcheck.py
+docker/types/networks.py
+docker/types/services.py
+docker/types/swarm.py
+docker/utils/__init__.py
+docker/utils/build.py
+docker/utils/config.py
+docker/utils/decorators.py
+docker/utils/fnmatch.py
+docker/utils/json_stream.py
+docker/utils/ports.py
+docker/utils/socket.py
+docker/utils/utils.py
+tests/__init__.py
+tests/helpers.py
+tests/integration/__init__.py
+tests/integration/api_build_test.py
+tests/integration/api_client_test.py
+tests/integration/api_config_test.py
+tests/integration/api_container_test.py
+tests/integration/api_exec_test.py
+tests/integration/api_healthcheck_test.py
+tests/integration/api_image_test.py
+tests/integration/api_network_test.py
+tests/integration/api_plugin_test.py
+tests/integration/api_secret_test.py
+tests/integration/api_service_test.py
+tests/integration/api_swarm_test.py
+tests/integration/api_volume_test.py
+tests/integration/base.py
+tests/integration/client_test.py
+tests/integration/conftest.py
+tests/integration/errors_test.py
+tests/integration/models_containers_test.py
+tests/integration/models_images_test.py
+tests/integration/models_networks_test.py
+tests/integration/models_nodes_test.py
+tests/integration/models_resources_test.py
+tests/integration/models_services_test.py
+tests/integration/models_swarm_test.py
+tests/integration/models_volumes_test.py
+tests/integration/regression_test.py
+tests/integration/testdata/dummy-plugin/config.json
+tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
+tests/unit/__init__.py
+tests/unit/api_build_test.py
+tests/unit/api_container_test.py
+tests/unit/api_exec_test.py
+tests/unit/api_image_test.py
+tests/unit/api_network_test.py
+tests/unit/api_test.py
+tests/unit/api_volume_test.py
+tests/unit/auth_test.py
+tests/unit/client_test.py
+tests/unit/dockertypes_test.py
+tests/unit/errors_test.py
+tests/unit/fake_api.py
+tests/unit/fake_api_client.py
+tests/unit/fake_stat.py
+tests/unit/models_containers_test.py
+tests/unit/models_images_test.py
+tests/unit/models_networks_test.py
+tests/unit/models_resources_test.py
+tests/unit/models_services_test.py
+tests/unit/ssladapter_test.py
+tests/unit/swarm_test.py
+tests/unit/utils_config_test.py
+tests/unit/utils_json_stream_test.py
+tests/unit/utils_test.py
+tests/unit/testdata/certs/ca.pem
+tests/unit/testdata/certs/cert.pem
+tests/unit/testdata/certs/key.pem \ No newline at end of file
diff --git a/docker_py.egg-info/dependency_links.txt b/docker.egg-info/dependency_links.txt
index 8b13789..8b13789 100644
--- a/docker_py.egg-info/dependency_links.txt
+++ b/docker.egg-info/dependency_links.txt
diff --git a/docker_py.egg-info/not-zip-safe b/docker.egg-info/not-zip-safe
index 8b13789..8b13789 100644
--- a/docker_py.egg-info/not-zip-safe
+++ b/docker.egg-info/not-zip-safe
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
new file mode 100644
index 0000000..623f4c7
--- /dev/null
+++ b/docker.egg-info/requires.txt
@@ -0,0 +1,21 @@
+requests!=2.18.0,>=2.14.2
+six>=1.4.0
+websocket-client>=0.32.0
+docker-pycreds>=0.2.2
+
+[:python_version < "3.3"]
+ipaddress>=1.0.16
+
+[:python_version < "3.5"]
+backports.ssl_match_hostname>=3.5
+
+[:sys_platform == "win32" and python_version < "3.6"]
+pypiwin32==219
+
+[:sys_platform == "win32" and python_version >= "3.6"]
+pypiwin32==220
+
+[tls]
+pyOpenSSL>=0.14
+cryptography>=1.3.4
+idna>=2.0.0
diff --git a/docker_py.egg-info/top_level.txt b/docker.egg-info/top_level.txt
index bdb9670..bdb9670 100644
--- a/docker_py.egg-info/top_level.txt
+++ b/docker.egg-info/top_level.txt
diff --git a/docker/__init__.py b/docker/__init__.py
index ad53805..cf732e1 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,7 @@
+# flake8: noqa
+from .api import APIClient
+from .client import DockerClient, from_env
from .version import version, version_info
__version__ = version
-__title__ = 'docker-py'
-
-from .client import Client, AutoVersionClient, from_env # flake8: noqa
+__title__ = 'docker'
diff --git a/docker/api/__init__.py b/docker/api/__init__.py
index bc7e93c..ff51844 100644
--- a/docker/api/__init__.py
+++ b/docker/api/__init__.py
@@ -1,10 +1,2 @@
# flake8: noqa
-from .build import BuildApiMixin
-from .container import ContainerApiMixin
-from .daemon import DaemonApiMixin
-from .exec_api import ExecApiMixin
-from .image import ImageApiMixin
-from .network import NetworkApiMixin
-from .service import ServiceApiMixin
-from .swarm import SwarmApiMixin
-from .volume import VolumeApiMixin
+from .client import APIClient
diff --git a/docker/api/build.py b/docker/api/build.py
index 7403716..d69985e 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,11 +1,11 @@
+import json
import logging
import os
-import re
-import json
+import random
+from .. import auth
from .. import constants
from .. import errors
-from .. import auth
from .. import utils
@@ -14,10 +14,105 @@ log = logging.getLogger(__name__)
class BuildApiMixin(object):
def build(self, path=None, tag=None, quiet=False, fileobj=None,
- nocache=False, rm=False, stream=False, timeout=None,
+ nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False, buildargs=None, gzip=False):
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None,
+ squash=None, extra_hosts=None, platform=None, isolation=None):
+ """
+ Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+ needs to be set. ``path`` can be a local path (to a directory
+ containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+ readable file-like object to a Dockerfile.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ Example:
+ >>> from io import BytesIO
+ >>> from docker import APIClient
+ >>> dockerfile = '''
+ ... # Shared Volume
+ ... FROM busybox:buildroot-2014.02
+ ... VOLUME /data
+ ... CMD ["/bin/sh"]
+ ... '''
+ >>> f = BytesIO(dockerfile.encode('utf-8'))
+ >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+ >>> response = [line for line in cli.build(
+ ... fileobj=f, rm=True, tag='yourname/volume'
+ ... )]
+ >>> response
+ ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+ '{"stream":"Step 1 : VOLUME /data\\n"}',
+ '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+ '{"stream":" ---\\u003e 713bca62012e\\n"}',
+ '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+ '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+ '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+ '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+ '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+ '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+
+ Returns:
+ A generator for the build output.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
remote = context = None
headers = {}
container_limits = container_limits or {}
@@ -50,23 +145,16 @@ class BuildApiMixin(object):
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
- exclude = list(filter(bool, f.read().splitlines()))
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [l.strip() for l in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
)
encoding = 'gzip' if gzip else encoding
- if utils.compare_version('1.8', self._version) >= 0:
- stream = True
-
- if dockerfile and utils.compare_version('1.17', self._version) < 0:
- raise errors.InvalidVersion(
- 'dockerfile was only introduced in API version 1.17'
- )
-
- if utils.compare_version('1.19', self._version) < 0:
- pull = 1 if pull else 0
-
u = self._url('/build')
params = {
't': tag,
@@ -81,42 +169,100 @@ class BuildApiMixin(object):
params.update(container_limits)
if buildargs:
- if utils.version_gte(self._version, '1.21'):
- params.update({'buildargs': json.dumps(buildargs)})
+ params.update({'buildargs': json.dumps(buildargs)})
+
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
+ if cache_from:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'cachefrom': json.dumps(cache_from)})
+ else:
+ raise errors.InvalidVersion(
+ 'cache_from was only introduced in API version 1.25'
+ )
+
+ if target:
+ if utils.version_gte(self._version, '1.29'):
+ params.update({'target': target})
+ else:
+ raise errors.InvalidVersion(
+ 'target was only introduced in API version 1.29'
+ )
+
+ if network_mode:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'networkmode': network_mode})
+ else:
+ raise errors.InvalidVersion(
+ 'network_mode was only introduced in API version 1.25'
+ )
+
+ if squash:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'squash': squash})
else:
raise errors.InvalidVersion(
- 'buildargs was only introduced in API version 1.21'
+ 'squash was only introduced in API version 1.25'
+ )
+
+ if extra_hosts is not None:
+ if utils.version_lt(self._version, '1.27'):
+ raise errors.InvalidVersion(
+ 'extra_hosts was only introduced in API version 1.27'
+ )
+
+ if isinstance(extra_hosts, dict):
+ extra_hosts = utils.format_extra_hosts(extra_hosts)
+ params.update({'extrahosts': extra_hosts})
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
)
+ params['platform'] = platform
+
+ if isolation is not None:
+ if utils.version_lt(self._version, '1.24'):
+ raise errors.InvalidVersion(
+ 'isolation was only introduced in API version 1.24'
+ )
+ params['isolation'] = isolation
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
- if utils.compare_version('1.9', self._version) >= 0:
- self._set_auth_headers(headers)
+ self._set_auth_headers(headers)
response = self._post(
u,
data=context,
params=params,
headers=headers,
- stream=stream,
+ stream=True,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
- if stream:
- return self._stream_helper(response, decode=decode)
- else:
- output = self._result(response)
- srch = r'Successfully built ([0-9a-f]+)'
- match = re.search(srch, output)
- if not match:
- return None, output
- return match.group(1), output
+ return self._stream_helper(response, decode=decode)
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
@@ -130,19 +276,50 @@ class BuildApiMixin(object):
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
+ auth_data = {}
+ if self._auth_configs.get('credsStore'):
+ # Using a credentials store, we need to retrieve the
+ # credentials for each registry listed in the config.json file
+ # Matches CLI behavior: https://github.com/docker/docker/blob/
+ # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
+ # credentials/native_store.go#L68-L83
+ for registry in self._auth_configs.get('auths', {}).keys():
+ auth_data[registry] = auth.resolve_authconfig(
+ self._auth_configs, registry
+ )
+ else:
+ auth_data = self._auth_configs.get('auths', {}).copy()
+ # See https://github.com/docker/docker-py/issues/1683
+ if auth.INDEX_NAME in auth_data:
+ auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
+
log.debug(
'Sending auth config ({0})'.format(
- ', '.join(repr(k) for k in self._auth_configs.keys())
+ ', '.join(repr(k) for k in auth_data.keys())
)
)
- if utils.compare_version('1.19', self._version) >= 0:
- headers['X-Registry-Config'] = auth.encode_header(
- self._auth_configs
- )
- else:
- headers['X-Registry-Config'] = auth.encode_header({
- 'configs': self._auth_configs
- })
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
else:
log.debug('No auth config found')
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ with open(abs_dockerfile, 'r') as df:
+ return (
+ '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ df.read()
+ )
+ else:
+ return (dockerfile, None)
diff --git a/docker/api/client.py b/docker/api/client.py
new file mode 100644
index 0000000..13c292a
--- /dev/null
+++ b/docker/api/client.py
@@ -0,0 +1,456 @@
+import json
+import struct
+from functools import partial
+
+import requests
+import requests.exceptions
+import six
+import websocket
+
+from .build import BuildApiMixin
+from .config import ConfigApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+from .. import auth
+from ..constants import (
+ DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
+ MINIMUM_DOCKER_API_VERSION
+)
+from ..errors import (
+ DockerException, InvalidVersion, TLSParameterError,
+ create_api_error_from_http_exception
+)
+from ..tls import TLSConfig
+from ..transport import SSLAdapter, UnixAdapter
+from ..utils import utils, check_resource, update_headers, config
+from ..utils.socket import frames_iter, socket_raw_iter
+from ..utils.json_stream import json_stream
+try:
+ from ..transport import NpipeAdapter
+except ImportError:
+ pass
+
+
+class APIClient(
+ requests.Session,
+ BuildApiMixin,
+ ConfigApiMixin,
+ ContainerApiMixin,
+ DaemonApiMixin,
+ ExecApiMixin,
+ ImageApiMixin,
+ NetworkApiMixin,
+ PluginApiMixin,
+ SecretApiMixin,
+ ServiceApiMixin,
+ SwarmApiMixin,
+ VolumeApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.33',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
+ u'Os': u'linux',
+ u'Version': u'17.10.0-ce'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+
+ __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
+ super(APIClient, self).__init__()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._general_configs = config.load_general_config()
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs
+ )
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ except NameError:
+ raise DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None:
+ self._version = DEFAULT_DOCKER_API_VERSION
+ elif isinstance(version, six.string_types):
+ if version.lower() == 'auto':
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ else:
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ raise InvalidVersion(
+ 'API versions below {} are no longer supported by this '
+ 'library.'.format(MINIMUM_DOCKER_API_VERSION)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, six.string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise create_api_error_from_http_exception(e)
+
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in six.iteritems(data):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ @check_resource('container')
+ def _attach_websocket(self, container, params=None):
+ url = self._url("/containers/{0}/attach/ws", container)
+ req = requests.Request("POST", url, params=self._attach_params(params))
+ full_url = req.prepare().url
+ full_url = full_url.replace("http://", "ws://", 1)
+ full_url = full_url.replace("https://", "wss://", 1)
+ return self._create_websocket_connection(full_url)
+
+ def _create_websocket_connection(self, url):
+ return websocket.create_connection(url)
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif six.PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ for chunk in json_stream(self._stream_helper(response, False)):
+ yield chunk
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ _, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
+ self._raise_for_status(response)
+ for out in response.iter_content(chunk_size, decode):
+ yield out
+
+ def _read_from_socket(self, response, stream, tty=False):
+ socket = self._get_raw_response_socket(response)
+
+ gen = None
+ if tty is False:
+ gen = frames_iter(socket)
+ else:
+ gen = socket_raw_iter(socket)
+
+ if stream:
+ return gen
+ else:
+ return six.binary_type().join(gen)
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ @check_resource('container')
+ def _check_is_tty(self, container):
+ cont = self.inspect_container(container)
+ return cont['Config']['Tty']
+
+ def _get_result(self, container, stream, res):
+ return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = six.binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except requests.exceptions.InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(dockercfg_path)
diff --git a/docker/api/config.py b/docker/api/config.py
new file mode 100644
index 0000000..b46b09c
--- /dev/null
+++ b/docker/api/config.py
@@ -0,0 +1,91 @@
+import base64
+
+import six
+
+from .. import utils
+
+
+class ConfigApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_config(self, name, data, labels=None):
+ """
+ Create a config
+
+ Args:
+ name (string): Name of the config
+ data (bytes): Config data to be stored
+ labels (dict): A mapping of labels to assign to the config
+
+ Returns (dict): ID of the newly created config
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ url = self._url('/configs/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_config(self, id):
+ """
+ Retrieve config metadata
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_config(self, id):
+ """
+ Remove a config
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def configs(self, filters=None):
+ """
+ List configs
+
+ Args:
+ filters (dict): A map of filters to process on the configs
+ list. Available filters: ``names``
+
+ Returns (list): A list of configs
+ """
+ url = self._url('/configs')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/container.py b/docker/api/container.py
index b8507d8..cb97b79 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,16 +1,43 @@
import six
-import warnings
from datetime import datetime
from .. import errors
from .. import utils
-from ..utils.utils import create_networking_config, create_endpoint_config
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..types import (
+ CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
+ NetworkingConfig
+)
class ContainerApiMixin(object):
- @utils.check_resource
+ @utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
+ """
+ Attach to a container.
+
+ The ``.logs()`` function is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ container (str): The container to attach to.
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
@@ -24,12 +51,34 @@ class ContainerApiMixin(object):
}
u = self._url("/containers/{0}/attach", container)
- response = self._post(u, headers=headers, params=params, stream=stream)
+ response = self._post(u, headers=headers, params=params, stream=True)
- return self._read_from_socket(response, stream)
+ output = self._read_from_socket(
+ response, stream, self._check_is_tty(container)
+ )
- @utils.check_resource
+ if stream:
+ return CancellableStream(output, response)
+ else:
+ return output
+
+ @utils.check_resource('container')
def attach_socket(self, container, params=None, ws=False):
+ """
+ Like ``attach``, but returns the underlying socket-like object for the
+ HTTP request.
+
+ Args:
+ container (str): The container to attach to.
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ For ``detachKeys``, ~/.docker/config.json is used by default.
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if params is None:
params = {
'stdout': 1,
@@ -37,6 +86,11 @@ class ContainerApiMixin(object):
'stream': 1
}
+ if 'detachKeys' not in params \
+ and 'detachKeys' in self._general_configs:
+
+ params['detachKeys'] = self._general_configs['detachKeys']
+
if ws:
return self._attach_websocket(container, params)
@@ -53,9 +107,29 @@ class ContainerApiMixin(object):
)
)
- @utils.check_resource
+ @utils.check_resource('container')
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ container (str): The image hash of the container
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'container': container,
'repo': repository,
@@ -71,6 +145,51 @@ class ContainerApiMixin(object):
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ quiet (bool): Only display numeric Ids
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ trunc (bool): Truncate output
+ latest (bool): Show only the latest created container, include
+ non-running ones.
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ size (bool): Display sizes
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ A list of dicts, one per container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
@@ -91,51 +210,207 @@ class ContainerApiMixin(object):
x['Id'] = x['Id'][:12]
return res
- @utils.check_resource
- def copy(self, container, resource):
- if utils.version_gte(self._version, '1.20'):
- warnings.warn(
- 'Client.copy() is deprecated for API version >= 1.20, '
- 'please use get_archive() instead',
- DeprecationWarning
- )
- res = self._post_json(
- self._url("/containers/{0}/copy".format(container)),
- data={"Resource": resource},
- stream=True
- )
- self._raise_for_status(res)
- return res.raw
-
def create_container(self, image, command=None, hostname=None, user=None,
- detach=False, stdin_open=False, tty=False,
- mem_limit=None, ports=None, environment=None,
- dns=None, volumes=None, volumes_from=None,
+ detach=False, stdin_open=False, tty=False, ports=None,
+ environment=None, volumes=None,
network_disabled=False, name=None, entrypoint=None,
- cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=None, cpuset=None, host_config=None,
- mac_address=None, labels=None, volume_driver=None,
- stop_signal=None, networking_config=None):
+ working_dir=None, domainname=None, host_config=None,
+ mac_address=None, labels=None, stop_signal=None,
+ networking_config=None, healthcheck=None,
+ stop_timeout=None, runtime=None):
+ """
+ Creates a container. Parameters are similar to those for the ``docker
+ run`` command except it doesn't support the attach options (``-a``).
- if isinstance(volumes, six.string_types):
- volumes = [volumes, ]
+ The arguments that are passed directly to this function are
+ host-independent configuration options. Host-specific configuration
+ is passed with the `host_config` argument. You'll normally want to
+ use this method in combination with the :py:meth:`create_host_config`
+ method to generate ``host_config``.
+
+ **Port bindings**
+
+ Port binding is done in two parts: first, provide a list of ports to
+ open inside the container with the ``ports`` parameter, then declare
+ bindings with the ``host_config`` parameter. For example:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[1111, 2222],
+ host_config=cli.create_host_config(port_bindings={
+ 1111: 4567,
+ 2222: None
+ })
+ )
+
+
+ You can limit the host address on which the port will be exposed like
+ such:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+
+ Or without host port assignment:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+
+ If you wish to use UDP instead of TCP (default), you need to declare
+ ports as such in both the config and host config:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
+ host_config=cli.create_host_config(port_bindings={
+ '1111/udp': 4567, 2222: None
+ })
+ )
+
+ To bind multiple host ports to a single container port, use the
+ following syntax:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [1234, 4567]
+ })
+
+ You can also bind multiple IPs to a single container port:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [
+ ('192.168.0.100', 1234),
+ ('192.168.0.101', 1234)
+ ]
+ })
+
+ **Using volumes**
- if host_config and utils.compare_version('1.15', self._version) < 0:
- raise errors.InvalidVersion(
- 'host_config is not supported in API < 1.15'
+ Volume declaration is done in two parts. Provide a list of
+ paths to use as mountpoints inside the container with the
+ ``volumes`` parameter, and declare mappings from paths on the host
+ in the ``host_config`` section.
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds={
+ '/home/user1/': {
+ 'bind': '/mnt/vol2',
+ 'mode': 'rw',
+ },
+ '/var/www': {
+ 'bind': '/mnt/vol1',
+ 'mode': 'ro',
+ }
+ })
)
+ You can alternatively specify binds as a list. This code is equivalent
+ to the example above:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ])
+ )
+
+ **Networking**
+
+ You can specify networks to connect the container to by using the
+ ``networking_config`` parameter. At the time of creation, you can
+ only connect a container to a single networking, but you
+ can create more connections by using
+ :py:meth:`~connect_container_to_network`.
+
+ For example:
+
+ .. code-block:: python
+
+ networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config(
+ ipv4_address='172.28.0.124',
+ aliases=['foo', 'bar'],
+ links=['container2']
+ )
+ })
+
+ ctnr = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ Args:
+ image (str): The image to run
+ command (str or list): The command to be run in the container
+ hostname (str): Optional hostname for the container
+ user (str or int): Username or UID
+ detach (bool): Detached mode: run container in the background and
+ return container ID
+ stdin_open (bool): Keep STDIN open even if not attached
+ tty (bool): Allocate a pseudo-TTY
+ ports (list of ints): A list of port numbers
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ volumes (str or list): List of paths inside the container to use
+ as volumes.
+ network_disabled (bool): Disable networking
+ name (str): A name for the container
+ entrypoint (str or list): An entrypoint
+ working_dir (str): Path to the working directory
+ domainname (str): The domain name to use for the container
+ host_config (dict): A dictionary created with
+ :py:meth:`create_host_config`.
+ mac_address (str): The Mac Address to assign the container
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ stop_timeout (int): Timeout to stop the container, in seconds.
+ Default: 10
+ networking_config (dict): A networking configuration generated
+ by :py:meth:`create_networking_config`.
+ runtime (str): Runtime to use with this container.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+
+ Returns:
+ A dictionary with an image 'Id' key and a 'Warnings' key.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
config = self.create_container_config(
- image, command, hostname, user, detach, stdin_open,
- tty, mem_limit, ports, environment, dns, volumes, volumes_from,
- network_disabled, entrypoint, cpu_shares, working_dir, domainname,
- memswap_limit, cpuset, host_config, mac_address, labels,
- volume_driver, stop_signal, networking_config,
+ image, command, hostname, user, detach, stdin_open, tty,
+ ports, environment, volumes,
+ network_disabled, entrypoint, working_dir, domainname,
+ host_config, mac_address, labels,
+ stop_signal, networking_config, healthcheck,
+ stop_timeout, runtime
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
- return utils.create_container_config(self._version, *args, **kwargs)
+ return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
@@ -146,6 +421,148 @@ class ContainerApiMixin(object):
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
+ """
+ Create a dictionary for the ``host_config`` argument to
+ :py:meth:`create_container`.
+
+ Args:
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ binds (dict): Volumes to bind. See :py:meth:`create_container`
+ for more information.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file
+ dns_search (:py:class:`list`): DNS search domains.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ lxc_conf (dict): LXC config.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``binds``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ port_bindings (dict): See :py:meth:`create_container`
+ for more information.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of dicts.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ runtime (str): Runtime to use with this container.
+
+
+ Returns:
+ (dict) A dictionary which can be passed to the ``host_config``
+ argument to :py:meth:`create_container`.
+
+ Example:
+
+ >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ volumes_from=['nostalgic_newton'])
+ {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
+ 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
+
+"""
if not kwargs:
kwargs = {}
if 'version' in kwargs:
@@ -154,31 +571,130 @@ class ContainerApiMixin(object):
"keyword argument 'version'"
)
kwargs['version'] = self._version
- return utils.create_host_config(*args, **kwargs)
+ return HostConfig(*args, **kwargs)
def create_networking_config(self, *args, **kwargs):
- return create_networking_config(*args, **kwargs)
+ """
+ Create a networking config dictionary to be used as the
+ ``networking_config`` parameter in :py:meth:`create_container`.
+
+ Args:
+ endpoints_config (dict): A dictionary mapping network names to
+ endpoint configurations generated by
+ :py:meth:`create_endpoint_config`.
+
+ Returns:
+ (dict) A networking config.
+
+ Example:
+
+ >>> docker_client.create_network('network1')
+ >>> networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config()
+ })
+ >>> container = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ """
+ return NetworkingConfig(*args, **kwargs)
def create_endpoint_config(self, *args, **kwargs):
- return create_endpoint_config(self._version, *args, **kwargs)
+ """
+ Create an endpoint config dictionary to be used with
+ :py:meth:`create_networking_config`.
+
+ Args:
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Returns:
+ (dict) An endpoint config.
+
+ Example:
- @utils.check_resource
+ >>> endpoint_config = client.create_endpoint_config(
+ aliases=['web', 'app'],
+ links=['app_db'],
+ ipv4_address='132.65.0.123'
+ )
+
+ """
+ return EndpointConfig(self._version, *args, **kwargs)
+
+ @utils.check_resource('container')
def diff(self, container):
+ """
+ Inspect changes on a container's filesystem.
+
+ Args:
+ container (str): The container to diff
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
- @utils.check_resource
- def export(self, container):
+ @utils.check_resource('container')
+ def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of a filesystem as a tar archive.
+
+ Args:
+ container (str): The container to export
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): The archived filesystem data stream
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
- self._raise_for_status(res)
- return res.raw
+ return self._stream_raw_result(res, chunk_size, False)
+
+ @utils.check_resource('container')
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Retrieve a file or folder from a container in the form of a tar
+ archive.
+
+ Args:
+ container (str): The container where the file is located
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
- @utils.check_resource
- @utils.minimum_version('1.20')
- def get_archive(self, container, path):
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'path': path
}
@@ -187,18 +703,43 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
- res.raw,
+ self._stream_raw_result(res, chunk_size, False),
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
- @utils.check_resource
+ @utils.check_resource('container')
def inspect_container(self, container):
+ """
+ Identical to the `docker inspect` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of `docker inspect`, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
- @utils.check_resource
+ @utils.check_resource('container')
def kill(self, container, signal=None):
+ """
+ Kill a container or send a signal to a container.
+
+ Args:
+ container (str): The container to kill
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
@@ -209,51 +750,128 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def logs(self, container, stdout=True, stderr=True, stream=False,
- timestamps=False, tail='all', since=None, follow=None):
- if utils.compare_version('1.11', self._version) >= 0:
- if follow is None:
- follow = stream
- params = {'stderr': stderr and 1 or 0,
- 'stdout': stdout and 1 or 0,
- 'timestamps': timestamps and 1 or 0,
- 'follow': follow and 1 or 0,
- }
- if utils.compare_version('1.13', self._version) >= 0:
- if tail != 'all' and (not isinstance(tail, int) or tail < 0):
- tail = 'all'
- params['tail'] = tail
-
- if since is not None:
- if utils.compare_version('1.19', self._version) < 0:
- raise errors.InvalidVersion(
- 'since is not supported in API < 1.19'
- )
- else:
- if isinstance(since, datetime):
- params['since'] = utils.datetime_to_timestamp(since)
- elif (isinstance(since, int) and since > 0):
- params['since'] = since
- url = self._url("/containers/{0}/logs", container)
- res = self._get(url, params=params, stream=stream)
- return self._get_result(container, stream, res)
- return self.attach(
- container,
- stdout=stdout,
- stderr=stderr,
- stream=stream,
- logs=True
- )
+ timestamps=False, tail='all', since=None, follow=None,
+ until=None):
+ """
+ Get logs from a container. Similar to the ``docker logs`` command.
- @utils.check_resource
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ container (str): The container to get logs from
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
+
+ Returns:
+ (generator or str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if follow is None:
+ follow = stream
+ params = {'stderr': stderr and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'timestamps': timestamps and 1 or 0,
+ 'follow': follow and 1 or 0,
+ }
+ if tail != 'all' and (not isinstance(tail, int) or tail < 0):
+ tail = 'all'
+ params['tail'] = tail
+
+ if since is not None:
+ if isinstance(since, datetime):
+ params['since'] = utils.datetime_to_timestamp(since)
+ elif (isinstance(since, int) and since > 0):
+ params['since'] = since
+ else:
+ raise errors.InvalidArgument(
+ 'since value should be datetime or positive int, '
+ 'not {}'.format(type(since))
+ )
+
+ if until is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'until is not supported for API version < 1.35'
+ )
+ if isinstance(until, datetime):
+ params['until'] = utils.datetime_to_timestamp(until)
+ elif (isinstance(until, int) and until > 0):
+ params['until'] = until
+ else:
+ raise errors.InvalidArgument(
+ 'until value should be datetime or positive int, '
+ 'not {}'.format(type(until))
+ )
+
+ url = self._url("/containers/{0}/logs", container)
+ res = self._get(url, params=params, stream=stream)
+ output = self._get_result(container, stream, res)
+
+ if stream:
+ return CancellableStream(output, res)
+ else:
+ return output
+
+ @utils.check_resource('container')
def pause(self, container):
+ """
+ Pauses all processes within a container.
+
+ Args:
+ container (str): The container to pause
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def port(self, container, private_port):
+ """
+ Lookup the public-facing port that is NAT-ed to ``private_port``.
+ Identical to the ``docker port`` command.
+
+ Args:
+ container (str): The container to look up
+ private_port (int): The private port to inspect
+
+ Returns:
+ (list of dict): The mapping for the host ports
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ .. code-block:: bash
+
+ $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
+ 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
+
+ .. code-block:: python
+
+ >>> cli.port('7174d6347063', 80)
+ [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
+ """
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
@@ -275,116 +893,190 @@ class ContainerApiMixin(object):
return h_ports
- @utils.check_resource
- @utils.minimum_version('1.20')
+ @utils.check_resource('container')
def put_archive(self, container, path, data):
+ """
+ Insert a file or folder in an existing container using a tar archive as
+ source.
+
+ Args:
+ container (str): The container where the file(s) will be extracted
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
self._raise_for_status(res)
return res.status_code == 200
- @utils.check_resource
+ @utils.minimum_version('1.25')
+ def prune_containers(self, filters=None):
+ """
+ Delete stopped containers
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted container IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/containers/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @utils.check_resource('container')
def remove_container(self, container, v=False, link=False, force=False):
+ """
+ Remove a container. Similar to the ``docker rm`` command.
+
+ Args:
+ container (str): The container to remove
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res)
- @utils.minimum_version('1.17')
- @utils.check_resource
+ @utils.check_resource('container')
def rename(self, container, name):
+ """
+ Rename a container. Similar to the ``docker rename`` command.
+
+ Args:
+ container (str): ID of the container to rename
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def resize(self, container, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ container (str or dict): The container to resize
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def restart(self, container, timeout=10):
+ """
+ Restart a container. Similar to the ``docker restart`` command.
+
+ Args:
+ container (str or dict): The container to restart. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
- def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=None, links=None, privileged=None,
- dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None, devices=None,
- extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None):
+ @utils.check_resource('container')
+ def start(self, container, *args, **kwargs):
+ """
+ Start a container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
- if utils.compare_version('1.10', self._version) < 0:
- if dns is not None:
- raise errors.InvalidVersion(
- 'dns is only supported for API version >= 1.10'
- )
- if volumes_from is not None:
- raise errors.InvalidVersion(
- 'volumes_from is only supported for API version >= 1.10'
- )
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
- if utils.compare_version('1.15', self._version) < 0:
- if security_opt is not None:
- raise errors.InvalidVersion(
- 'security_opt is only supported for API version >= 1.15'
- )
- if ipc_mode:
- raise errors.InvalidVersion(
- 'ipc_mode is only supported for API version >= 1.15'
- )
- if utils.compare_version('1.17', self._version) < 0:
- if read_only is not None:
- raise errors.InvalidVersion(
- 'read_only is only supported for API version >= 1.17'
- )
- if pid_mode is not None:
- raise errors.InvalidVersion(
- 'pid_mode is only supported for API version >= 1.17'
- )
+ Args:
+ container (str): The container to start
- if utils.compare_version('1.18', self._version) < 0:
- if ulimits is not None:
- raise errors.InvalidVersion(
- 'ulimits is only supported for API version >= 1.18'
- )
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
- start_config_kwargs = dict(
- binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
- publish_all_ports=publish_all_ports, links=links, dns=dns,
- privileged=privileged, dns_search=dns_search, cap_add=cap_add,
- cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
- network_mode=network_mode, restart_policy=restart_policy,
- extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
- ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
- )
- start_config = None
-
- if any(v is not None for v in start_config_kwargs.values()):
- if utils.compare_version('1.15', self._version) > 0:
- warnings.warn(
- 'Passing host config parameters in start() is deprecated. '
- 'Please use host_config in create_container instead!',
- DeprecationWarning
- )
- start_config = self.create_host_config(**start_config_kwargs)
+ Example:
+ >>> container = cli.create_container(
+ ... image='busybox:latest',
+ ... command='/bin/sleep 30')
+ >>> cli.start(container=container.get('Id'))
+ """
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
url = self._url("/containers/{0}/start", container)
- res = self._post_json(url, data=start_config)
+ res = self._post(url)
self._raise_for_status(res)
- @utils.minimum_version('1.17')
- @utils.check_resource
+ @utils.check_resource('container')
def stats(self, container, decode=None, stream=True):
+ """
+ Stream statistics for a specific container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ container (str): The container to stream statistics from
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
@@ -393,36 +1085,100 @@ class ContainerApiMixin(object):
return self._result(self._get(url, params={'stream': False}),
json=True)
- @utils.check_resource
- def stop(self, container, timeout=10):
- params = {'t': timeout}
+ @utils.check_resource('container')
+ def stop(self, container, timeout=None):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ container (str): The container to stop
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. If None, then the
+ StopTimeout value of the container will be used.
+ Default: None
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if timeout is None:
+ params = {}
+ timeout = 10
+ else:
+ params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def top(self, container, ps_args=None):
+ """
+ Display the running processes of a container.
+
+ Args:
+ container (str): The container to inspect
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True)
- @utils.check_resource
+ @utils.check_resource('container')
def unpause(self, container):
+ """
+ Unpause all processes within a container.
+
+ Args:
+ container (str): The container to unpause
+ """
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.22')
- @utils.check_resource
+ @utils.check_resource('container')
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
- mem_reservation=None, memswap_limit=None, kernel_memory=None
+ mem_reservation=None, memswap_limit=None, kernel_memory=None,
+ restart_policy=None
):
+ """
+ Update resource configs of one or more containers.
+
+ Args:
+ container (str): The container to inspect
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
@@ -445,16 +1201,49 @@ class ContainerApiMixin(object):
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
+ if restart_policy:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'restart policy update is not supported '
+ 'for API version < 1.23'
+ )
+ data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True)
- @utils.check_resource
- def wait(self, container, timeout=None):
+ @utils.check_resource('container')
+ def wait(self, container, timeout=None, condition=None):
+ """
+ Block until a container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ container (str or dict): The container to wait on. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/wait", container)
- res = self._post(url, timeout=timeout)
- self._raise_for_status(res)
- json_ = res.json()
- if 'StatusCode' in json_:
- return json_['StatusCode']
- return -1
+ params = {}
+ if condition is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'wait condition is not supported for API version < 1.30'
+ )
+ params['condition'] = condition
+
+ res = self._post(url, timeout=timeout, params=params)
+ return self._result(res, True)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 9ebe73c..fc3692c 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -1,14 +1,64 @@
import os
-import warnings
from datetime import datetime
-from ..auth import auth
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from ..utils import utils
+from .. import auth, types, utils
class DaemonApiMixin(object):
+ @utils.minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ A :py:class:`docker.types.daemon.CancellableStream` generator
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events()
+ ... print event
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
@@ -23,22 +73,51 @@ class DaemonApiMixin(object):
'until': until,
'filters': filters
}
+ url = self._url('/events')
- return self._stream_helper(
- self.get(self._url('/events'), params=params, stream=True),
- decode=decode
- )
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return types.CancellableStream(stream, response)
def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
- reauth=False, insecure_registry=False, dockercfg_path=None):
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
- DeprecationWarning
- )
+ reauth=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
@@ -65,12 +144,36 @@ class DaemonApiMixin(object):
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- self._auth_configs[registry or auth.INDEX_NAME] = req_data
+ if 'auths' not in self._auth_configs:
+ self._auth_configs['auths'] = {}
+ self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True)
def ping(self):
- return self._result(self._get(self._url('/_ping')))
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 6e49996..986d87f 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -5,21 +5,52 @@ from .. import utils
class ExecApiMixin(object):
- @utils.minimum_version('1.15')
- @utils.check_resource
+ @utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
- stdin=False, tty=False, privileged=False, user=''):
- if privileged and utils.compare_version('1.19', self._version) < 0:
- raise errors.InvalidVersion(
- 'Privileged exec is not supported in API < 1.19'
- )
- if user and utils.compare_version('1.19', self._version) < 0:
+ stdin=False, tty=False, privileged=False, user='',
+ environment=None, workdir=None, detach_keys=None):
+ """
+ Sets up an exec instance in a running container.
+
+ Args:
+ container (str): Target container where exec instance will be
+ created
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+ detach_keys (str): Override the key sequence for detaching
+ a container. Format is a single character `[a-Z]`
+ or `ctrl-<value>` where `<value>` is one of:
+ `a-z`, `@`, `^`, `[`, `,` or `_`.
+ ~/.docker/config.json is used by default.
+
+ Returns:
+ (dict): A dictionary with an exec ``Id`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
- 'User-specific exec is not supported in API < 1.19'
+ 'Setting environment for exec is not supported in API < 1.25'
)
+
if isinstance(cmd, six.string_types):
cmd = utils.split_command(cmd)
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
data = {
'Container': container,
'User': user,
@@ -28,22 +59,55 @@ class ExecApiMixin(object):
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
- 'Cmd': cmd
+ 'Cmd': cmd,
+ 'Env': environment,
}
+ if workdir is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'workdir is not supported for API version < 1.35'
+ )
+ data['WorkingDir'] = workdir
+
+ if detach_keys:
+ data['detachKeys'] = detach_keys
+ elif 'detachKeys' in self._general_configs:
+ data['detachKeys'] = self._general_configs['detachKeys']
+
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
- @utils.minimum_version('1.16')
def exec_inspect(self, exec_id):
+ """
+ Return low-level information about an exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+
+ Returns:
+ (dict): Dictionary of values returned by the endpoint.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
- @utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None):
+ """
+ Resize the tty session used by the specified exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ height (int): Height of tty session
+ width (int): Width of tty session
+ """
+
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -52,12 +116,31 @@ class ExecApiMixin(object):
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.minimum_version('1.15')
+ @utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
+ """
+ Start a previously set up exec instance.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations.
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. If ``socket=True``, a socket object for the
+ connection. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
# we want opened socket if socket == True
- if isinstance(exec_id, dict):
- exec_id = exec_id.get('Id')
data = {
'Tty': tty,
@@ -75,7 +158,8 @@ class ExecApiMixin(object):
data=data,
stream=True
)
-
+ if detach:
+ return self._result(res)
if socket:
return self._get_raw_response_socket(res)
- return self._read_from_socket(res, stream)
+ return self._read_from_socket(res, stream, tty)
diff --git a/docker/api/image.py b/docker/api/image.py
index 7f25f9d..5f05d88 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,35 +1,84 @@
import logging
import os
+
import six
-import warnings
-from ..auth import auth
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from .. import utils
-from .. import errors
+from .. import auth, errors, utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
class ImageApiMixin(object):
- @utils.check_resource
- def get_image(self, image):
+ @utils.check_resource('image')
+ def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ image (str): Image name to get
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
+ >>> f.close()
+ """
res = self._get(self._url("/images/{0}/get", image), stream=True)
- self._raise_for_status(res)
- return res.raw
+ return self._stream_raw_result(res, chunk_size, False)
- @utils.check_resource
+ @utils.check_resource('image')
def history(self, image):
+ """
+ Show the history of an image.
+
+ Args:
+ image (str): The image to show history for
+
+ Returns:
+ (str): The history of the image
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
- def images(self, name=None, quiet=False, all=False, viz=False,
- filters=None):
- if viz:
- if utils.compare_version('1.7', self._version) >= 0:
- raise Exception('Viz output is not supported in API >= 1.7!')
- return self._result(self._get(self._url("images/viz")))
+ def images(self, name=None, quiet=False, all=False, filters=None):
+ """
+ List images. Similar to the ``docker images`` command.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ quiet (bool): Only return numeric IDs as a list.
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (dict or list): A list if ``quiet=True``, otherwise a dict.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
@@ -45,6 +94,25 @@ class ImageApiMixin(object):
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
+ """
+ Import an image. Similar to the ``docker import`` command.
+
+ If ``src`` is a string or unicode string, it will first be treated as a
+ path to a tarball on the local system. If there is an error reading
+ from that file, ``src`` will be treated as a URL instead to fetch the
+ image from. You can also pass an open file handle as ``src``, in which
+ case the data will be read from that file.
+
+ If ``src`` is unset but ``image`` is set, the ``image`` parameter will
+ be taken as the name of an existing image to import from.
+
+ Args:
+ src (str or file): Path to tarfile, URL, or file-like object
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ image (str): Use another image like the ``FROM`` Dockerfile
+ parameter
+ """
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
@@ -78,6 +146,16 @@ class ImageApiMixin(object):
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
+ allows importing in-memory bytes data.
+
+ Args:
+ data (bytes collection): Bytes collection containing valid tar data
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
@@ -88,12 +166,22 @@ class ImageApiMixin(object):
u, data=data, params=params, headers=headers, timeout=None
)
)
- return self.import_image(
- src=data, repository=repository, tag=tag, changes=changes
- )
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a tar file on disk.
+
+ Args:
+ filename (str): Full path to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+
+ Raises:
+ IOError: File does not exist.
+ """
+
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
@@ -107,47 +195,177 @@ class ImageApiMixin(object):
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a URL.
+
+ Args:
+ url (str): A URL pointing to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from another image, like the ``FROM`` Dockerfile
+ parameter.
+
+ Args:
+ image (str): Image name to import from
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
- @utils.check_resource
- def insert(self, image, url, path):
- if utils.compare_version('1.12', self._version) >= 0:
- raise errors.DeprecatedMethod(
- 'insert is not available for API version >=1.12'
- )
- api_url = self._url("/images/{0}/insert", image)
- params = {
- 'url': url,
- 'path': path
- }
- return self._result(self._post(api_url, params=params))
-
- @utils.check_resource
+ @utils.check_resource('image')
def inspect_image(self, image):
+ """
+ Get detailed information about an image. Similar to the ``docker
+ inspect`` command, but only for images.
+
+ Args:
+ image (str): The image to inspect
+
+ Returns:
+ (dict): Similar to the output of ``docker inspect``, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
- def load_image(self, data):
- res = self._post(self._url("/images/load"), data=data)
- self._raise_for_status(res)
+ @utils.minimum_version('1.30')
+ @utils.check_resource('image')
+ def inspect_distribution(self, image):
+ """
+ Get image digest and platform information by contacting the registry.
- def pull(self, repository, tag=None, stream=False,
- insecure_registry=False, auth_config=None, decode=False):
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
- DeprecationWarning
- )
+ Args:
+ image (str): The image name to inspect
+ Returns:
+ (dict): A dict containing distribution data
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self._result(
+ self._get(self._url("/distribution/{0}/json", image)), True
+ )
+
+ def load_image(self, data, quiet=None):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
+ save``). Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+ quiet (boolean): Suppress progress details in response.
+
+ Returns:
+ (generator): Progress output as JSON objects. Only available for
+ API version >= 1.23
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+
+ if quiet is not None:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'quiet is not supported in API version < 1.23'
+ )
+ params['quiet'] = quiet
+
+ res = self._post(
+ self._url("/images/load"), data=data, params=params, stream=True
+ )
+ if utils.version_gte(self._version, '1.23'):
+ return self._stream_helper(res, decode=True)
+
+ self._raise_for_status(res)
+
+ @utils.minimum_version('1.25')
+ def prune_images(self, filters=None):
+ """
+ Delete unused images
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+
+ Returns:
+ (dict): A dict containing a list of deleted image IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/images/prune")
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._post(url, params=params), True)
+
+ def pull(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False, platform=None):
+ """
+ Pulls an image. Similar to the ``docker pull`` command.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ stream (bool): Stream the output as a generator
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+
+ Returns:
+ (generator or str): The output
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for line in cli.pull('busybox', stream=True):
+ ... print(json.dumps(json.loads(line), indent=4))
+ {
+ "status": "Pulling image (latest) from busybox",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+ {
+ "status": "Pulling image (latest) from busybox, endpoint: ...",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+
+ """
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
@@ -158,14 +376,20 @@ class ImageApiMixin(object):
}
headers = {}
- if utils.compare_version('1.5', self._version) >= 0:
- if auth_config is None:
- header = auth.get_config_header(self, registry)
- if header:
- headers['X-Registry-Auth'] = header
- else:
- log.debug('Sending supplied auth config')
- headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
response = self._post(
self._url('/images/create'), params=params, headers=headers,
@@ -179,14 +403,40 @@ class ImageApiMixin(object):
return self._result(response)
- def push(self, repository, tag=None, stream=False,
- insecure_registry=False, auth_config=None, decode=False):
- if insecure_registry:
- warnings.warn(
- INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
- DeprecationWarning
- )
-
+ def push(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False):
+ """
+ Push an image or a repository to the registry. Similar to the ``docker
+ push`` command.
+
+ Args:
+ repository (str): The repository to push to
+ tag (str): An optional tag to push
+ stream (bool): Stream the output as a blocking generator
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+
+ Returns:
+ (generator or str): The output from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ >>> for line in cli.push('yourname/app', stream=True):
+ ... print line
+ {"status":"Pushing repository yourname/app (1 tags)"}
+ {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
+ {"status":"Image already pushed, skipping","progressDetail":{},
+ "id":"511136ea3c5a"}
+ ...
+
+ """
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
@@ -196,14 +446,13 @@ class ImageApiMixin(object):
}
headers = {}
- if utils.compare_version('1.5', self._version) >= 0:
- if auth_config is None:
- header = auth.get_config_header(self, registry)
- if header:
- headers['X-Registry-Auth'] = header
- else:
- log.debug('Sending supplied auth config')
- headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
@@ -216,20 +465,63 @@ class ImageApiMixin(object):
return self._result(response)
- @utils.check_resource
+ @utils.check_resource('image')
def remove_image(self, image, force=False, noprune=False):
+ """
+ Remove an image. Similar to the ``docker rmi`` command.
+
+ Args:
+ image (str): The image to remove
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+ """
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
- self._raise_for_status(res)
+ return self._result(res, True)
def search(self, term):
+ """
+ Search for images on Docker Hub. Similar to the ``docker search``
+ command.
+
+ Args:
+ term (str): A term to search for.
+
+ Returns:
+ (list of dicts): The response of the search.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
- @utils.check_resource
+ @utils.check_resource('image')
def tag(self, image, repository, tag=None, force=False):
+ """
+ Tag an image into a repository. Similar to the ``docker tag`` command.
+
+ Args:
+ image (str): The image to tag
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Returns:
+ (bool): ``True`` if successful
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ force=True)
+ """
params = {
'tag': tag,
'repo': repository,
diff --git a/docker/api/network.py b/docker/api/network.py
index 0ee0dab..57ed8d3 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -1,29 +1,97 @@
-import json
-
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
+from .. import utils
class NetworkApiMixin(object):
- @minimum_version('1.21')
- def networks(self, names=None, ids=None):
- filters = {}
+ def networks(self, names=None, ids=None, filters=None):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by
+ ids (:py:class:`list`): List of ids to filter by
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+
+ Returns:
+ (dict): List of network objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if filters is None:
+ filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
-
- params = {'filters': json.dumps(filters)}
-
+ params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
- @minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
- enable_ipv6=False):
+ enable_ipv6=False, attachable=None, scope=None,
+ ingress=None):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (dict): The created network reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.create_network("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> docker_client.create_network("network1", driver="bridge",
+ ipam=ipam_config)
+ """
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -32,7 +100,7 @@ class NetworkApiMixin(object):
'Driver': driver,
'Options': options,
'IPAM': ipam,
- 'CheckDuplicate': check_duplicate
+ 'CheckDuplicate': check_duplicate,
}
if labels is not None:
@@ -57,28 +125,116 @@ class NetworkApiMixin(object):
'supported in API version < 1.22')
data['Internal'] = True
+ if attachable is not None:
+ if version_lt(self._version, '1.24'):
+ raise InvalidVersion(
+ 'attachable is not supported in API version < 1.24'
+ )
+ data['Attachable'] = attachable
+
+ if ingress is not None:
+ if version_lt(self._version, '1.29'):
+ raise InvalidVersion(
+ 'ingress is not supported in API version < 1.29'
+ )
+
+ data['Ingress'] = ingress
+
+ if scope is not None:
+ if version_lt(self._version, '1.30'):
+ raise InvalidVersion(
+ 'scope is not supported in API version < 1.30'
+ )
+ data['Scope'] = scope
+
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
- @minimum_version('1.21')
+ @minimum_version('1.25')
+ def prune_networks(self, filters=None):
+ """
+ Delete unused networks
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted network names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/networks/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @check_resource('net_id')
def remove_network(self, net_id):
+ """
+ Remove a network. Similar to the ``docker network rm`` command.
+
+ Args:
+ net_id (str): The network's id
+ """
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
- @minimum_version('1.21')
- def inspect_network(self, net_id):
+ @check_resource('net_id')
+ def inspect_network(self, net_id, verbose=None, scope=None):
+ """
+ Get detailed information about a network.
+
+ Args:
+ net_id (str): ID of network
+ verbose (bool): Show the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+ """
+ params = {}
+ if verbose is not None:
+ if version_lt(self._version, '1.28'):
+ raise InvalidVersion('verbose was introduced in API 1.28')
+ params['verbose'] = verbose
+ if scope is not None:
+ if version_lt(self._version, '1.31'):
+ raise InvalidVersion('scope was introduced in API 1.31')
+ params['scope'] = scope
+
url = self._url("/networks/{0}", net_id)
- res = self._get(url)
+ res = self._get(url, params=params)
return self._result(res, json=True)
- @check_resource
- @minimum_version('1.21')
+ @check_resource('container')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
+ """
+ Connect a container to a network.
+
+ Args:
+ container (str): container-id/name to be connected to the network
+ net_id (str): network id
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local
+ (IPv4/IPv6) addresses.
+ """
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
@@ -91,10 +247,19 @@ class NetworkApiMixin(object):
res = self._post_json(url, data=data)
self._raise_for_status(res)
- @check_resource
- @minimum_version('1.21')
+ @check_resource('container')
def disconnect_container_from_network(self, container, net_id,
force=False):
+ """
+ Disconnect a container from a network.
+
+ Args:
+ container (str): container ID or name to be disconnected from the
+ network
+ net_id (str): network ID
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+ """
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
new file mode 100644
index 0000000..73f1852
--- /dev/null
+++ b/docker/api/plugin.py
@@ -0,0 +1,251 @@
+import six
+
+from .. import auth, utils
+
+
+class PluginApiMixin(object):
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def configure_plugin(self, name, options):
+ """
+ Configure a plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ options (dict): A key-value mapping of options
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/set', name)
+ data = options
+ if isinstance(data, dict):
+ data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def create_plugin(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/create')
+
+ with utils.create_archive(root=plugin_data_dir, gzip=gzip) as archv:
+ res = self._post(url, params={'name': name}, data=archv)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def disable_plugin(self, name):
+ """
+ Disable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/disable', name)
+ res = self._post(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def enable_plugin(self, name, timeout=0):
+ """
+ Enable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ timeout (int): Operation timeout (in seconds). Default: 0
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/enable', name)
+ params = {'timeout': timeout}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def inspect_plugin(self, name):
+ """
+ Retrieve plugin metadata.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ A dict containing plugin info
+ """
+ url = self._url('/plugins/{0}/json', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def pull_plugin(self, remote, privileges, name=None):
+ """
+ Pull and install a plugin. After the plugin is installed, it can be
+ enabled using :py:meth:`~enable_plugin`.
+
+ Args:
+ remote (string): Remote reference for the plugin to install.
+ The ``:latest`` tag is optional, and is the default if
+ omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+ name (string): Local name for the pulled plugin. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+ url = self._url('/plugins/pull')
+ params = {
+ 'remote': remote,
+ }
+ if name:
+ params['name'] = name
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
+
+ @utils.minimum_version('1.25')
+ def plugins(self):
+ """
+ Retrieve a list of installed plugins.
+
+ Returns:
+ A list of dicts, one per plugin
+ """
+ url = self._url('/plugins')
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def plugin_privileges(self, name):
+ """
+ Retrieve list of privileges to be granted to a plugin.
+
+ Args:
+ name (string): Name of the remote plugin to examine. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ A list of dictionaries representing the plugin's
+ permissions
+
+ """
+ params = {
+ 'remote': name,
+ }
+
+ url = self._url('/plugins/privileges')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def push_plugin(self, name):
+ """
+ Push a plugin to the registry.
+
+ Args:
+ name (string): Name of the plugin to upload. The ``:latest``
+ tag is optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/pull', name)
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ res = self._post(url, headers=headers)
+ self._raise_for_status(res)
+ return self._stream_helper(res, decode=True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def remove_plugin(self, name, force=False):
+ """
+ Remove an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to remove. The ``:latest``
+ tag is optional, and is the default if omitted.
+ force (bool): Disable the plugin before removing. This may
+ result in issues if the plugin is in use by a container.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}', name)
+ res = self._delete(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.26')
+ @utils.check_resource('name')
+ def upgrade_plugin(self, name, remote, privileges):
+ """
+ Upgrade an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to upgrade. The ``:latest``
+ tag is optional and is the default if omitted.
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+
+ url = self._url('/plugins/{0}/upgrade', name)
+ params = {
+ 'remote': remote,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
diff --git a/docker/api/secret.py b/docker/api/secret.py
new file mode 100644
index 0000000..fa4c2ab
--- /dev/null
+++ b/docker/api/secret.py
@@ -0,0 +1,102 @@
+import base64
+
+import six
+
+from .. import errors
+from .. import utils
+
+
+class SecretApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_secret(self, name, data, labels=None, driver=None):
+ """
+ Create a secret
+
+ Args:
+ name (string): Name of the secret
+ data (bytes): Secret data to be stored
+ labels (dict): A mapping of labels to assign to the secret
+ driver (DriverConfig): A custom driver configuration. If
+ unspecified, the default ``internal`` driver will be used
+
+ Returns (dict): ID of the newly created secret
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ if driver is not None:
+ if utils.version_lt(self._version, '1.31'):
+ raise errors.InvalidVersion(
+ 'Secret driver is only available for API version > 1.31'
+ )
+
+ body['Driver'] = driver
+
+ url = self._url('/secrets/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_secret(self, id):
+ """
+ Retrieve secret metadata
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_secret(self, id):
+ """
+ Remove a secret
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def secrets(self, filters=None):
+ """
+ List secrets
+
+ Args:
+ filters (dict): A map of filters to process on the secrets
+ list. Available filters: ``names``
+
+ Returns (list): A list of secrets
+ """
+ url = self._url('/secrets')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/service.py b/docker/api/service.py
index baebbad..03b0ca6 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -1,14 +1,137 @@
-from .. import errors
-from .. import utils
-from ..auth import auth
+from .. import auth, errors, utils
+from ..types import ServiceMode
+
+
+def _check_api_features(version, task_template, update_config, endpoint_spec):
+
+ def raise_version_error(param, min_version):
+ raise errors.InvalidVersion(
+ '{} is not supported in API version < {}'.format(
+ param, min_version
+ )
+ )
+
+ if update_config is not None:
+ if utils.version_lt(version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
+ if 'Monitor' in update_config:
+ raise_version_error('UpdateConfig.monitor', '1.25')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('UpdateConfig.order', '1.29')
+
+ if endpoint_spec is not None:
+ if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
+ if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
+ raise_version_error('EndpointSpec.Ports[].mode', '1.32')
+
+ if task_template is not None:
+ if 'ForceUpdate' in task_template and utils.version_lt(
+ version, '1.25'):
+ raise_version_error('force_update', '1.25')
+
+ if task_template.get('Placement'):
+ if utils.version_lt(version, '1.30'):
+ if task_template['Placement'].get('Platforms'):
+ raise_version_error('Placement.platforms', '1.30')
+ if utils.version_lt(version, '1.27'):
+ if task_template['Placement'].get('Preferences'):
+ raise_version_error('Placement.preferences', '1.27')
+
+ if task_template.get('ContainerSpec'):
+ container_spec = task_template.get('ContainerSpec')
+
+ if utils.version_lt(version, '1.25'):
+ if container_spec.get('TTY'):
+ raise_version_error('ContainerSpec.tty', '1.25')
+ if container_spec.get('Hostname') is not None:
+ raise_version_error('ContainerSpec.hostname', '1.25')
+ if container_spec.get('Hosts') is not None:
+ raise_version_error('ContainerSpec.hosts', '1.25')
+ if container_spec.get('Groups') is not None:
+ raise_version_error('ContainerSpec.groups', '1.25')
+ if container_spec.get('DNSConfig') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.25')
+ if container_spec.get('Healthcheck') is not None:
+ raise_version_error('ContainerSpec.healthcheck', '1.25')
+
+ if utils.version_lt(version, '1.28'):
+ if container_spec.get('ReadOnly') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.28')
+ if container_spec.get('StopSignal') is not None:
+ raise_version_error('ContainerSpec.stop_signal', '1.28')
+
+ if utils.version_lt(version, '1.30'):
+ if container_spec.get('Configs') is not None:
+ raise_version_error('ContainerSpec.configs', '1.30')
+ if container_spec.get('Privileges') is not None:
+ raise_version_error('ContainerSpec.privileges', '1.30')
+
+ if utils.version_lt(version, '1.35'):
+ if container_spec.get('Isolation') is not None:
+ raise_version_error('ContainerSpec.isolation', '1.35')
+
+ if task_template.get('Resources'):
+ if utils.version_lt(version, '1.32'):
+ if task_template['Resources'].get('GenericResources'):
+ raise_version_error('Resources.generic_resources', '1.32')
+
+
+def _merge_task_template(current, override):
+ merged = current.copy()
+ if override is not None:
+ for ts_key, ts_value in override.items():
+ if ts_key == 'ContainerSpec':
+ if 'ContainerSpec' not in merged:
+ merged['ContainerSpec'] = {}
+ for cs_key, cs_value in override['ContainerSpec'].items():
+ if cs_value is not None:
+ merged['ContainerSpec'][cs_key] = cs_value
+ elif ts_value is not None:
+ merged[ts_key] = ts_value
+ return merged
class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
- update_config=None, networks=None, endpoint_config=None
+ update_config=None, networks=None, endpoint_config=None,
+ endpoint_spec=None
):
+ """
+ Create a service.
+
+ Args:
+ task_template (TaskTemplate): Specification of the task to start as
+ part of the new service.
+ name (string): User-defined name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ A dictionary containing an ``ID`` key for the newly created
+ service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
+
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
@@ -16,38 +139,96 @@ class ServiceApiMixin(object):
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
+ if mode and not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
+ if utils.version_lt(self._version, '1.25'):
+ networks = networks or task_template.pop('Networks', None)
data = {
'Name': name,
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
- 'UpdateConfig': update_config,
- 'Networks': networks,
- 'Endpoint': endpoint_config
+ 'Networks': utils.convert_service_networks(networks),
+ 'EndpointSpec': endpoint_spec
}
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
- @utils.check_resource
- def inspect_service(self, service):
+ @utils.check_resource('service')
+ def inspect_service(self, service, insert_defaults=None):
+ """
+ Return information about a service.
+
+ Args:
+ service (str): Service name or ID.
+ insert_defaults (boolean): If true, default values will be merged
+ into the service inspect output.
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/services/{0}', service)
- return self._result(self._get(url), True)
+ params = {}
+ if insert_defaults is not None:
+ if utils.version_lt(self._version, '1.29'):
+ raise errors.InvalidVersion(
+ 'insert_defaults is not supported in API version < 1.29'
+ )
+ params['insertDefaults'] = insert_defaults
+
+ return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('task')
def inspect_task(self, task):
+ """
+ Retrieve information about a task.
+
+ Args:
+ task (str): Task ID
+
+ Returns:
+ (dict): Information about the task.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('service')
def remove_service(self, service):
+ """
+ Stop and remove a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
@@ -55,14 +236,95 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def services(self, filters=None):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+
+ Returns:
+ A list of dictionaries containing data about each service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/services')
return self._result(self._get(url, params=params), True)
+ @utils.minimum_version('1.25')
+ @utils.check_resource('service')
+ def service_logs(self, service, details=False, follow=False, stdout=False,
+ stderr=False, since=0, timestamps=False, tail='all',
+ is_tty=None):
+ """
+ Get log stream for a service.
+ Note: This endpoint works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ service (str): ID or name of the service
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+ is_tty (bool): Whether the service's :py:class:`ContainerSpec`
+ enables the TTY option. If omitted, the method will query
+ the Engine for the information, causing an additional
+ roundtrip.
+
+ Returns (generator): Logs for the service.
+ """
+ params = {
+ 'details': details,
+ 'follow': follow,
+ 'stdout': stdout,
+ 'stderr': stderr,
+ 'since': since,
+ 'timestamps': timestamps,
+ 'tail': tail
+ }
+
+ url = self._url('/services/{0}/logs', service)
+ res = self._get(url, params=params, stream=True)
+ if is_tty is None:
+ is_tty = self.inspect_service(
+ service
+ )['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
+ return self._get_result_tty(True, res, is_tty)
+
@utils.minimum_version('1.24')
def tasks(self, filters=None):
+ """
+ Retrieve a list of tasks.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``service``, ``node``,
+ ``label`` and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -70,33 +332,110 @@ class ServiceApiMixin(object):
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
- networks=None, endpoint_config=None):
+ networks=None, endpoint_config=None,
+ endpoint_spec=None, fetch_current_spec=False):
+ """
+ Update a service.
+
+ Args:
+ service (string): A service identifier (either its name or service
+ ID).
+ version (int): The version number of the service object being
+ updated. This is required to avoid conflicting writes.
+ task_template (TaskTemplate): Specification of the updated task to
+ start as part of the service.
+ name (string): New name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ fetch_current_spec (boolean): Use the undefined settings from the
+ current specification of the service. Default: ``False``
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
+
+ if fetch_current_spec:
+ inspect_defaults = True
+ if utils.version_lt(self._version, '1.29'):
+ inspect_defaults = None
+ current = self.inspect_service(
+ service, insert_defaults=inspect_defaults
+ )['Spec']
+
+ else:
+ current = {}
+
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
- if name is not None:
- data['Name'] = name
- if labels is not None:
- data['Labels'] = labels
+
+ data['Name'] = current.get('Name') if name is None else name
+
+ data['Labels'] = current.get('Labels') if labels is None else labels
+
if mode is not None:
+ if not isinstance(mode, dict):
+ mode = ServiceMode(mode)
data['Mode'] = mode
- if task_template is not None:
- image = task_template.get('ContainerSpec', {}).get('Image', None)
- if image is not None:
- registry, repo_name = auth.resolve_repository_name(image)
- auth_header = auth.get_config_header(self, registry)
- if auth_header:
- headers['X-Registry-Auth'] = auth_header
- data['TaskTemplate'] = task_template
+ else:
+ data['Mode'] = current.get('Mode')
+
+ data['TaskTemplate'] = _merge_task_template(
+ current.get('TaskTemplate', {}), task_template
+ )
+
+ container_spec = data['TaskTemplate'].get('ContainerSpec', {})
+ image = container_spec.get('Image', None)
+ if image is not None:
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+
if update_config is not None:
data['UpdateConfig'] = update_config
+ else:
+ data['UpdateConfig'] = current.get('UpdateConfig')
+
if networks is not None:
- data['Networks'] = networks
- if endpoint_config is not None:
- data['Endpoint'] = endpoint_config
+ converted_networks = utils.convert_service_networks(networks)
+ if utils.version_lt(self._version, '1.25'):
+ data['Networks'] = converted_networks
+ else:
+ data['TaskTemplate']['Networks'] = converted_networks
+ elif utils.version_lt(self._version, '1.25'):
+ data['Networks'] = current.get('Networks')
+ elif data['TaskTemplate'].get('Networks') is None:
+ current_task_template = current.get('TaskTemplate', {})
+ current_networks = current_task_template.get('Networks')
+ if current_networks is None:
+ current_networks = current.get('Networks')
+ if current_networks is not None:
+ data['TaskTemplate']['Networks'] = current_networks
+
+ if endpoint_spec is not None:
+ data['EndpointSpec'] = endpoint_spec
+ else:
+ data['EndpointSpec'] = current.get('EndpointSpec')
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index d099364..04595da 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,16 +1,121 @@
-from .. import utils
import logging
+from six.moves import http_client
+from .. import errors
+from .. import types
+from .. import utils
+
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
- return utils.SwarmSpec(*args, **kwargs)
+ """
+ Create a :py:class:`docker.types.SwarmSpec` instance that can be used
+ as the ``swarm_spec`` argument in
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
+
+ Args:
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_cas (:py:class:`list`): Configuration for forwarding
+ signing requests to an external certificate authority. Use
+ a list of :py:class:`docker.types.SwarmExternalCA`.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ :py:class:`docker.types.SwarmSpec`
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> spec = client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ >>> client.init_swarm(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, swarm_spec=spec
+ )
+ """
+ ext_ca = kwargs.pop('external_ca', None)
+ if ext_ca:
+ kwargs['external_cas'] = [ext_ca]
+ return types.SwarmSpec(self._version, *args, **kwargs)
+
+ @utils.minimum_version('1.24')
+ def get_unlock_key(self):
+ """
+ Get the unlock key for this Swarm manager.
+
+ Returns:
+ A ``dict`` containing an ``UnlockKey`` member
+ """
+ return self._result(self._get(self._url('/swarm/unlockkey')), True)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
+ """
+ Initialize a new Swarm using the current connected engine as the first
+ node.
+
+ Args:
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ ``advertise_addr`` is not specified, it will be automatically
+ detected when possible. Default: None
+ listen_addr (string): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: '0.0.0.0:2377'
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ swarm_spec (dict): Configuration settings of the new Swarm. Use
+ ``APIClient.create_swarm_spec`` to generate a valid
+ configuration. Default: None
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
@@ -26,18 +131,67 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def inspect_swarm(self):
+ """
+ Retrieve low-level information about the current swarm.
+
+ Returns:
+ A dictionary containing data about the swarm.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm')
return self._result(self._get(url), True)
- @utils.check_resource
+ @utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
+ """
+ Retrieve low-level information about a swarm node
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A dictionary containing data about this node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
- def join_swarm(self, remote_addrs, join_token, listen_addr=None,
+ def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None):
+ """
+ Make this Engine join a swarm that has already been created.
+
+ Args:
+ remote_addrs (:py:class:`list`): Addresses of one or more manager
+ nodes already participating in the Swarm to join.
+ join_token (string): Secret token for joining this Swarm.
+ listen_addr (string): Listen address used for inter-manager
+ communication if the node gets promoted to manager, as well as
+ determining the networking interface used for the VXLAN Tunnel
+ Endpoint (VTEP). Default: ``None``
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ AdvertiseAddr is not specified, it will be automatically
+ detected when possible. Default: ``None``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
@@ -51,13 +205,49 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
+ """
+ Leave a swarm.
+
+ Args:
+ force (bool): Leave the swarm even if this node is a manager.
+ Default: ``False``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
+ # Ignore "this node is not part of a swarm" error
+ if force and response.status_code == http_client.NOT_ACCEPTABLE:
+ return True
+ # FIXME: Temporary workaround for 1.13.0-rc bug
+ # https://github.com/docker/docker/issues/29192
+ if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
+ return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of dictionaries containing data about each swarm node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes')
params = {}
if filters:
@@ -65,9 +255,130 @@ class SwarmApiMixin(object):
return self._result(self._get(url, params=params), True)
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def remove_node(self, node_id, force=False):
+ """
+ Remove a node from the swarm.
+
+ Args:
+ node_id (string): ID of the node to be removed.
+ force (bool): Force remove an active node. Default: `False`
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node referenced doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ Returns:
+ `True` if the request was successful.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ params = {
+ 'force': force
+ }
+ res = self._delete(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def unlock_swarm(self, key):
+ """
+ Unlock a locked swarm.
+
+ Args:
+ key (string): The unlock key as provided by
+ :py:meth:`get_unlock_key`
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the key argument is in an incompatible format
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ `True` if the request was successful.
+
+ Example:
+
+ >>> key = client.get_unlock_key()
+ >>> client.unlock_node(key)
+
+ """
+ if isinstance(key, dict):
+ if 'UnlockKey' not in key:
+ raise errors.InvalidArgument('Invalid unlock key format')
+ else:
+ key = {'UnlockKey': key}
+
+ url = self._url('/swarm/unlock')
+ res = self._post_json(url, data=key)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_node(self, node_id, version, node_spec=None):
+ """
+ Update the node's configuration
+
+ Args:
+
+ node_id (string): ID of the node to be updated.
+ version (int): The version number of the node object being
+ updated. This is required to avoid conflicting writes.
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ node_spec=node_spec)
+
+ """
+ url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
+ res = self._post_json(url, data=node_spec)
+ self._raise_for_status(res)
+ return True
+
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
+ """
+ Update the Swarm's configuration
+
+ Args:
+ version (int): The version number of the swarm object being
+ updated. This is required to avoid conflicting writes.
+ swarm_spec (dict): Configuration settings to update. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
+ generate a valid configuration. Default: ``None``.
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
diff --git a/docker/api/volume.py b/docker/api/volume.py
index afc72cb..900a608 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -3,16 +3,70 @@ from .. import utils
class VolumeApiMixin(object):
- @utils.minimum_version('1.21')
def volumes(self, filters=None):
+ """
+ List volumes currently registered by the docker daemon. Similar to the
+ ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (dict): Dictionary with list of volume objects as value of the
+ ``Volumes`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.volumes()
+ {u'Volumes': [{u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'},
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
+ u'Name': u'baz'}]}
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
- @utils.minimum_version('1.21')
- def create_volume(self, name, driver=None, driver_opts=None, labels=None):
+ def create_volume(self, name=None, driver=None, driver_opts=None,
+ labels=None):
+ """
+ Create and register a named volume
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (dict): The created volume reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = cli.create_volume(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+ >>> print(volume)
+ {u'Driver': u'local',
+ u'Labels': {u'key': u'value'},
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar',
+ u'Scope': u'local'}
+
+ """
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
@@ -34,13 +88,74 @@ class VolumeApiMixin(object):
return self._result(self._post_json(url, data=data), True)
- @utils.minimum_version('1.21')
def inspect_volume(self, name):
+ """
+ Retrieve volume info by name.
+
+ Args:
+ name (str): volume name
+
+ Returns:
+ (dict): Volume information dictionary
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.inspect_volume('foobar')
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
- @utils.minimum_version('1.21')
- def remove_volume(self, name):
- url = self._url('/volumes/{0}', name)
+ @utils.minimum_version('1.25')
+ def prune_volumes(self, filters=None):
+ """
+ Delete unused volumes
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted volume names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/volumes/prune')
+ return self._result(self._post(url, params=params), True)
+
+ def remove_volume(self, name, force=False):
+ """
+ Remove a volume. Similar to the ``docker volume rm`` command.
+
+ Args:
+ name (str): The volume's name
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ params = {}
+ if force:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force removal was introduced in API 1.25'
+ )
+ params = {'force': force}
+
+ url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
diff --git a/docker/auth/auth.py b/docker/auth.py
index dc0baea..48fcd8b 100644
--- a/docker/auth/auth.py
+++ b/docker/auth.py
@@ -1,17 +1,15 @@
import base64
import json
import logging
-import os
import dockerpycreds
import six
-from .. import errors
+from . import errors
+from .utils import config
INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
-DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
-LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -69,6 +67,15 @@ def split_repo_name(repo_name):
return tuple(parts)
+def get_credential_store(authconfig, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = 'https://index.docker.io/v1/'
+
+ return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
+ 'credsStore'
+ )
+
+
def resolve_authconfig(authconfig, registry=None):
"""
Returns the authentication data from the given auth configuration for a
@@ -76,25 +83,33 @@ def resolve_authconfig(authconfig, registry=None):
with full URLs are stripped down to hostnames before checking for a match.
Returns None if no match was found.
"""
- if 'credsStore' in authconfig:
- log.debug(
- 'Using credentials store "{0}"'.format(authconfig['credsStore'])
- )
- return _resolve_authconfig_credstore(
- authconfig, registry, authconfig['credsStore']
- )
+
+ if 'credHelpers' in authconfig or 'credsStore' in authconfig:
+ store_name = get_credential_store(authconfig, registry)
+ if store_name is not None:
+ log.debug(
+ 'Using credentials store "{0}"'.format(store_name)
+ )
+ cfg = _resolve_authconfig_credstore(
+ authconfig, registry, store_name
+ )
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
- if registry in authconfig:
+ authdict = authconfig.get('auths', {})
+ if registry in authdict:
log.debug("Found {0}".format(repr(registry)))
- return authconfig[registry]
+ return authdict[registry]
- for key, config in six.iteritems(authconfig):
+ for key, conf in six.iteritems(authdict):
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
- return config
+ return conf
log.debug("No entry found")
return None
@@ -104,7 +119,7 @@ def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
- registry = 'https://index.docker.io/v1/'
+ registry = INDEX_URL
log.debug("Looking for auth entry for {0}".format(repr(registry)))
store = dockerpycreds.Store(credstore_name)
try:
@@ -189,7 +204,7 @@ def parse_auth(entries, raise_on_error=False):
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'
+ 'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
@@ -209,34 +224,7 @@ def parse_auth(entries, raise_on_error=False):
return conf
-def find_config_file(config_path=None):
- environment_path = os.path.join(
- os.environ.get('DOCKER_CONFIG'),
- os.path.basename(DOCKER_CONFIG_FILENAME)
- ) if os.environ.get('DOCKER_CONFIG') else None
-
- paths = filter(None, [
- config_path, # 1
- environment_path, # 2
- os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
- os.path.join(
- os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
- ) # 4
- ])
-
- log.debug("Trying paths: {0}".format(repr(paths)))
-
- for path in paths:
- if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
- return path
-
- log.debug("No config file found")
-
- return None
-
-
-def load_config(config_path=None):
+def load_config(config_path=None, config_dict=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
@@ -244,36 +232,45 @@ def load_config(config_path=None):
explicit config_path parameter > DOCKER_CONFIG environment variable >
~/.docker/config.json > ~/.dockercfg
"""
- config_file = find_config_file(config_path)
- if not config_file:
- return {}
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return {}
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return _load_legacy_config(config_file)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return res
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret"
+ "as auth-only file"
+ )
+ return parse_auth(config_dict)
- try:
- with open(config_file) as f:
- data = json.load(f)
- res = {}
- if data.get('auths'):
- log.debug("Found 'auths' section")
- res.update(parse_auth(data['auths'], raise_on_error=True))
- if data.get('HttpHeaders'):
- log.debug("Found 'HttpHeaders' section")
- res.update({'HttpHeaders': data['HttpHeaders']})
- if data.get('credsStore'):
- log.debug("Found 'credsStore' section")
- res.update({'credsStore': data['credsStore']})
- if res:
- return res
- else:
- log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
- f.seek(0)
- return parse_auth(json.load(f))
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
+def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
diff --git a/docker/auth/__init__.py b/docker/auth/__init__.py
deleted file mode 100644
index 6fc83f8..0000000
--- a/docker/auth/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .auth import (
- INDEX_NAME,
- INDEX_URL,
- encode_header,
- load_config,
- resolve_authconfig,
- resolve_repository_name,
-) # flake8: noqa \ No newline at end of file
diff --git a/docker/client.py b/docker/client.py
index 3fa19e0..b4364c3 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,406 +1,203 @@
-import json
-import struct
-from functools import partial
-
-import requests
-import requests.exceptions
-import six
-import websocket
-
-
-from . import api
-from . import constants
-from . import errors
-from .auth import auth
-from .ssladapter import ssladapter
-from .tls import TLSConfig
-from .transport import UnixAdapter
-from .utils import utils, check_resource, update_headers, kwargs_from_env
-from .utils.socket import frames_iter
-try:
- from .transport import NpipeAdapter
-except ImportError:
- pass
-
-
-def from_env(**kwargs):
- return Client.from_env(**kwargs)
-
-
-class Client(
- requests.Session,
- api.BuildApiMixin,
- api.ContainerApiMixin,
- api.DaemonApiMixin,
- api.ExecApiMixin,
- api.ImageApiMixin,
- api.NetworkApiMixin,
- api.ServiceApiMixin,
- api.SwarmApiMixin,
- api.VolumeApiMixin):
- def __init__(self, base_url=None, version=None,
- timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=constants.DEFAULT_USER_AGENT,
- num_pools=constants.DEFAULT_NUM_POOLS):
- super(Client, self).__init__()
-
- if tls and not base_url:
- raise errors.TLSParameterError(
- 'If using TLS, the base_url argument must be provided.'
- )
-
- self.base_url = base_url
- self.timeout = timeout
- self.headers['User-Agent'] = user_agent
-
- self._auth_configs = auth.load_config()
-
- base_url = utils.parse_host(
- base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
- )
- if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
- base_url, timeout, num_pools=num_pools
- )
- self.mount('http+docker://', self._custom_adapter)
- self._unmount('http://', 'https://')
- self.base_url = 'http+docker://localunixsocket'
- elif base_url.startswith('npipe://'):
- if not constants.IS_WINDOWS_PLATFORM:
- raise errors.DockerException(
- 'The npipe:// protocol is only supported on Windows'
- )
- try:
- self._custom_adapter = NpipeAdapter(
- base_url, timeout, num_pools=num_pools
- )
- except NameError:
- raise errors.DockerException(
- 'Install pypiwin32 package to enable npipe:// support'
- )
- self.mount('http+docker://', self._custom_adapter)
- self.base_url = 'http+docker://localnpipe'
- else:
- # Use SSLAdapter for the ability to specify SSL version
- if isinstance(tls, TLSConfig):
- tls.configure_client(self)
- elif tls:
- self._custom_adapter = ssladapter.SSLAdapter(
- pool_connections=num_pools
- )
- self.mount('https://', self._custom_adapter)
- self.base_url = base_url
-
- # version detection needs to be after unix adapter mounting
- if version is None:
- self._version = constants.DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
- else:
- raise errors.DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
- type(version).__name__
- )
- )
+from .api.client import APIClient
+from .constants import DEFAULT_TIMEOUT_SECONDS
+from .models.configs import ConfigCollection
+from .models.containers import ContainerCollection
+from .models.images import ImageCollection
+from .models.networks import NetworkCollection
+from .models.nodes import NodeCollection
+from .models.plugins import PluginCollection
+from .models.secrets import SecretCollection
+from .models.services import ServiceCollection
+from .models.swarm import Swarm
+from .models.volumes import VolumeCollection
+from .utils import kwargs_from_env
+
+
+class DockerClient(object):
+ """
+ A client for communicating with a Docker server.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+ def __init__(self, *args, **kwargs):
+ self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
+ """
+ Return a client configured from environment variables.
+
+ The environment variables used are the same as those used by the
+ Docker command-line client. They are:
+
+ .. envvar:: DOCKER_HOST
+
+ The URL to the Docker host.
+
+ .. envvar:: DOCKER_TLS_VERIFY
+
+ Verify the host against a CA certificate.
+
+ .. envvar:: DOCKER_CERT_PATH
+
+ A path to a directory containing TLS certificates to use when
+ connecting to the Docker host.
+
+ Args:
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+ environment (dict): The environment to read environment variables
+ from. Default: the value of ``os.environ``
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.from_env()
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
version = kwargs.pop('version', None)
- return cls(version=version, **kwargs_from_env(**kwargs))
-
- def _retrieve_server_version(self):
- try:
- return self.version(api_version=False)["ApiVersion"]
- except KeyError:
- raise errors.DockerException(
- 'Invalid response from docker daemon: key "ApiVersion"'
- ' is missing.'
- )
- except Exception as e:
- raise errors.DockerException(
- 'Error while fetching server API version: {0}'.format(e)
- )
-
- def _set_request_timeout(self, kwargs):
- """Prepare the kwargs for an HTTP request by inserting the timeout
- parameter, if not already present."""
- kwargs.setdefault('timeout', self.timeout)
- return kwargs
-
- @update_headers
- def _post(self, url, **kwargs):
- return self.post(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _get(self, url, **kwargs):
- return self.get(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _put(self, url, **kwargs):
- return self.put(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _delete(self, url, **kwargs):
- return self.delete(url, **self._set_request_timeout(kwargs))
-
- def _url(self, pathfmt, *args, **kwargs):
- for arg in args:
- if not isinstance(arg, six.string_types):
- raise ValueError(
- 'Expected a string but found {0} ({1}) '
- 'instead'.format(arg, type(arg))
- )
-
- quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
- args = map(quote_f, args)
-
- if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
- self.base_url, self._version, pathfmt.format(*args)
- )
- else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
-
- def _raise_for_status(self, response, explanation=None):
- """Raises stored :class:`APIError`, if one occurred."""
- try:
- response.raise_for_status()
- except requests.exceptions.HTTPError as e:
- if e.response.status_code == 404:
- raise errors.NotFound(e, response, explanation=explanation)
- raise errors.APIError(e, response, explanation=explanation)
-
- def _result(self, response, json=False, binary=False):
- assert not (json and binary)
- self._raise_for_status(response)
-
- if json:
- return response.json()
- if binary:
- return response.content
- return response.text
-
- def _post_json(self, url, data, **kwargs):
- # Go <1.1 can't unserialize null to a string
- # so we do this disgusting thing here.
- data2 = {}
- if data is not None:
- for k, v in six.iteritems(data):
- if v is not None:
- data2[k] = v
-
- if 'headers' not in kwargs:
- kwargs['headers'] = {}
- kwargs['headers']['Content-Type'] = 'application/json'
- return self._post(url, data=json.dumps(data2), **kwargs)
-
- def _attach_params(self, override=None):
- return override or {
- 'stdout': 1,
- 'stderr': 1,
- 'stream': 1
- }
-
- @check_resource
- def _attach_websocket(self, container, params=None):
- url = self._url("/containers/{0}/attach/ws", container)
- req = requests.Request("POST", url, params=self._attach_params(params))
- full_url = req.prepare().url
- full_url = full_url.replace("http://", "ws://", 1)
- full_url = full_url.replace("https://", "wss://", 1)
- return self._create_websocket_connection(full_url)
-
- def _create_websocket_connection(self, url):
- return websocket.create_connection(url)
-
- def _get_raw_response_socket(self, response):
- self._raise_for_status(response)
- if self.base_url == "http+docker://localnpipe":
- sock = response.raw._fp.fp.raw.sock
- elif six.PY3:
- sock = response.raw._fp.fp.raw
- if self.base_url.startswith("https://"):
- sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
- try:
- # Keep a reference to the response to stop it being garbage
- # collected. If the response is garbage collected, it will
- # close TLS sockets.
- sock._response = response
- except AttributeError:
- # UNIX sockets can't have attributes set on them, but that's
- # fine because we won't be doing TLS over them
- pass
-
- return sock
-
- def _stream_helper(self, response, decode=False):
- """Generator for data coming from a chunked-encoded HTTP response."""
- if response.raw._fp.chunked:
- reader = response.raw
- while not reader.closed:
- # this read call will block until we get a chunk
- data = reader.read(1)
- if not data:
- break
- if reader._fp.chunk_left:
- data += reader.read(reader._fp.chunk_left)
- if decode:
- if six.PY3:
- data = data.decode('utf-8')
- # remove the trailing newline
- data = data.strip()
- # split the data at any newlines
- data_list = data.split("\r\n")
- # load and yield each line seperately
- for data in data_list:
- data = json.loads(data)
- yield data
- else:
- yield data
- else:
- # Response isn't chunked, meaning we probably
- # encountered an error immediately
- yield self._result(response, json=decode)
-
- def _multiplexed_buffer_helper(self, response):
- """A generator of multiplexed data blocks read from a buffered
- response."""
- buf = self._result(response, binary=True)
- walker = 0
- while True:
- if len(buf[walker:]) < 8:
- break
- _, length = struct.unpack_from('>BxxxL', buf[walker:])
- start = walker + constants.STREAM_HEADER_SIZE_BYTES
- end = start + length
- walker = end
- yield buf[start:end]
-
- def _multiplexed_response_stream_helper(self, response):
- """A generator of multiplexed data blocks coming from a response
- stream."""
-
- # Disable timeout on the underlying socket to prevent
- # Read timed out(s) for long running processes
- socket = self._get_raw_response_socket(response)
- self._disable_socket_timeout(socket)
-
- while True:
- header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
- if not header:
- break
- _, length = struct.unpack('>BxxxL', header)
- if not length:
- continue
- data = response.raw.read(length)
- if not data:
- break
- yield data
-
- def _stream_raw_result_old(self, response):
- ''' Stream raw output for API versions below 1.6 '''
- self._raise_for_status(response)
- for line in response.iter_lines(chunk_size=1,
- decode_unicode=True):
- # filter out keep-alive new lines
- if line:
- yield line
-
- def _stream_raw_result(self, response):
- ''' Stream result for TTY-enabled container above API 1.6 '''
- self._raise_for_status(response)
- for out in response.iter_content(chunk_size=1, decode_unicode=True):
- yield out
-
- def _read_from_socket(self, response, stream):
- socket = self._get_raw_response_socket(response)
-
- if stream:
- return frames_iter(socket)
- else:
- return six.binary_type().join(frames_iter(socket))
-
- def _disable_socket_timeout(self, socket):
- """ Depending on the combination of python version and whether we're
- connecting over http or https, we might need to access _sock, which
- may or may not exist; or we may need to just settimeout on socket
- itself, which also may or may not have settimeout on it. To avoid
- missing the correct one, we try both.
-
- We also do not want to set the timeout if it is already disabled, as
- you run the risk of changing a socket that was non-blocking to
- blocking, for example when using gevent.
+ return cls(timeout=timeout, version=version,
+ **kwargs_from_env(**kwargs))
+
+ # Resources
+ @property
+ def configs(self):
+ """
+ An object for managing configs on the server. See the
+ :doc:`configs documentation <configs>` for full details.
"""
- sockets = [socket, getattr(socket, '_sock', None)]
-
- for s in sockets:
- if not hasattr(s, 'settimeout'):
- continue
-
- timeout = -1
-
- if hasattr(s, 'gettimeout'):
- timeout = s.gettimeout()
-
- # Don't change the timeout if it is already disabled.
- if timeout is None or timeout == 0.0:
- continue
-
- s.settimeout(None)
-
- def _get_result(self, container, stream, res):
- cont = self.inspect_container(container)
- return self._get_result_tty(stream, res, cont['Config']['Tty'])
-
- def _get_result_tty(self, stream, res, is_tty):
- # Stream multi-plexing was only introduced in API v1.6. Anything
- # before that needs old-style streaming.
- if utils.compare_version('1.6', self._version) < 0:
- return self._stream_raw_result_old(res)
-
- # We should also use raw streaming (without keep-alives)
- # if we're dealing with a tty-enabled container.
- if is_tty:
- return self._stream_raw_result(res) if stream else \
- self._result(res, binary=True)
-
- self._raise_for_status(res)
- sep = six.binary_type()
- if stream:
- return self._multiplexed_response_stream_helper(res)
- else:
- return sep.join(
- [x for x in self._multiplexed_buffer_helper(res)]
- )
-
- def _unmount(self, *args):
- for proto in args:
- self.adapters.pop(proto)
-
- def get_adapter(self, url):
- try:
- return super(Client, self).get_adapter(url)
- except requests.exceptions.InvalidSchema as e:
- if self._custom_adapter:
- return self._custom_adapter
- else:
- raise e
+ return ConfigCollection(client=self)
@property
- def api_version(self):
- return self._version
+ def containers(self):
+ """
+ An object for managing containers on the server. See the
+ :doc:`containers documentation <containers>` for full details.
+ """
+ return ContainerCollection(client=self)
+ @property
+ def images(self):
+ """
+ An object for managing images on the server. See the
+ :doc:`images documentation <images>` for full details.
+ """
+ return ImageCollection(client=self)
-class AutoVersionClient(Client):
- def __init__(self, *args, **kwargs):
- if 'version' in kwargs and kwargs['version']:
- raise errors.DockerException(
- 'Can not specify version for AutoVersionClient'
- )
- kwargs['version'] = 'auto'
- super(AutoVersionClient, self).__init__(*args, **kwargs)
+ @property
+ def networks(self):
+ """
+ An object for managing networks on the server. See the
+ :doc:`networks documentation <networks>` for full details.
+ """
+ return NetworkCollection(client=self)
+
+ @property
+ def nodes(self):
+ """
+ An object for managing nodes on the server. See the
+ :doc:`nodes documentation <nodes>` for full details.
+ """
+ return NodeCollection(client=self)
+
+ @property
+ def plugins(self):
+ """
+ An object for managing plugins on the server. See the
+ :doc:`plugins documentation <plugins>` for full details.
+ """
+ return PluginCollection(client=self)
+
+ @property
+ def secrets(self):
+ """
+ An object for managing secrets on the server. See the
+ :doc:`secrets documentation <secrets>` for full details.
+ """
+ return SecretCollection(client=self)
+
+ @property
+ def services(self):
+ """
+ An object for managing services on the server. See the
+ :doc:`services documentation <services>` for full details.
+ """
+ return ServiceCollection(client=self)
+
+ @property
+ def swarm(self):
+ """
+ An object for managing a swarm on the server. See the
+ :doc:`swarm documentation <swarm>` for full details.
+ """
+ return Swarm(client=self)
+
+ @property
+ def volumes(self):
+ """
+ An object for managing volumes on the server. See the
+ :doc:`volumes documentation <volumes>` for full details.
+ """
+ return VolumeCollection(client=self)
+
+ # Top-level methods
+ def events(self, *args, **kwargs):
+ return self.api.events(*args, **kwargs)
+ events.__doc__ = APIClient.events.__doc__
+
+ def df(self):
+ return self.api.df()
+ df.__doc__ = APIClient.df.__doc__
+
+ def info(self, *args, **kwargs):
+ return self.api.info(*args, **kwargs)
+ info.__doc__ = APIClient.info.__doc__
+
+ def login(self, *args, **kwargs):
+ return self.api.login(*args, **kwargs)
+ login.__doc__ = APIClient.login.__doc__
+
+ def ping(self, *args, **kwargs):
+ return self.api.ping(*args, **kwargs)
+ ping.__doc__ = APIClient.ping.__doc__
+
+ def version(self, *args, **kwargs):
+ return self.api.version(*args, **kwargs)
+ version.__doc__ = APIClient.version.__doc__
+
+ def close(self):
+ return self.api.close()
+ close.__doc__ = APIClient.close.__doc__
+
+ def __getattr__(self, name):
+ s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ # If a user calls a method on APIClient, they
+ if hasattr(APIClient, name):
+ s.append("In Docker SDK for Python 2.0, this method is now on the "
+ "object APIClient. See the low-level API section of the "
+ "documentation for more details.")
+ raise AttributeError(' '.join(s))
+
+
+from_env = DockerClient.from_env
diff --git a/docker/constants.py b/docker/constants.py
index 0c9a020..7565a76 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,8 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.24'
+DEFAULT_DOCKER_API_VERSION = '1.35'
+MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
@@ -14,5 +15,6 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
-DEFAULT_USER_AGENT = "docker-py/{0}".format(version)
+DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
diff --git a/docker/errors.py b/docker/errors.py
index 97be802..0253695 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -1,18 +1,47 @@
import requests
-class APIError(requests.exceptions.HTTPError):
- def __init__(self, message, response, explanation=None):
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = (response.content or '').strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and ('No such image' in str(explanation) or
+ 'not found: does not exist or no pull access'
+ in str(explanation) or
+ 'repository does not exist' in str(explanation)):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise cls(e, response=response, explanation=explanation)
+
+
+class APIError(requests.exceptions.HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
-
self.explanation = explanation
- if self.explanation is None and response.content:
- self.explanation = response.content.strip()
-
def __str__(self):
message = super(APIError, self).__str__()
@@ -29,18 +58,27 @@ class APIError(requests.exceptions.HTTPError):
return message
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
def is_client_error(self):
- return 400 <= self.response.status_code < 500
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
def is_server_error(self):
- return 500 <= self.response.status_code < 600
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
-class DockerException(Exception):
+class NotFound(APIError):
pass
-class NotFound(APIError):
+class ImageNotFound(NotFound):
pass
@@ -56,6 +94,10 @@ class InvalidConfigFile(DockerException):
pass
+class InvalidArgument(DockerException):
+ pass
+
+
class DeprecatedMethod(DockerException):
pass
@@ -73,3 +115,48 @@ class TLSParameterError(DockerException):
class NullResource(DockerException, ValueError):
pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+
+ err = ": {}".format(stderr) if stderr is not None else ""
+ msg = ("Command '{}' in image '{}' returned non-zero exit "
+ "status {}{}").format(command, image, exit_status, err)
+
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super(BuildError, self).__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
+ text = ["{}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
diff --git a/docker/models/__init__.py b/docker/models/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/docker/models/__init__.py
diff --git a/docker/models/configs.py b/docker/models/configs.py
new file mode 100644
index 0000000..7f23f65
--- /dev/null
+++ b/docker/models/configs.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Config(Model):
+ """A config."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this config.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If config failed to remove.
+ """
+ return self.client.api.remove_config(self.id)
+
+
+class ConfigCollection(Collection):
+ """Configs on the Docker server."""
+ model = Config
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_config(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_config.__doc__
+
+ def get(self, config_id):
+ """
+ Get a config.
+
+ Args:
+ config_id (str): Config ID.
+
+ Returns:
+ (:py:class:`Config`): The config.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the config does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_config(config_id))
+
+ def list(self, **kwargs):
+ """
+ List configs. Similar to the ``docker config ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Config`): The configs.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.configs(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/containers.py b/docker/models/containers.py
new file mode 100644
index 0000000..1e06ed6
--- /dev/null
+++ b/docker/models/containers.py
@@ -0,0 +1,1056 @@
+import copy
+import ntpath
+from collections import namedtuple
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import (
+ ContainerError, DockerException, ImageNotFound,
+ create_unexpected_kwargs_error
+)
+from ..types import HostConfig
+from ..utils import version_gte
+from .images import Image
+from .resource import Collection, Model
+
+
+class Container(Model):
+
+ @property
+ def name(self):
+ """
+ The name of the container.
+ """
+ if self.attrs.get('Name') is not None:
+ return self.attrs['Name'].lstrip('/')
+
+ @property
+ def image(self):
+ """
+ The image of the container.
+ """
+ image_id = self.attrs.get('ImageID', self.attrs['Image'])
+ if image_id is None:
+ return None
+ return self.client.images.get(image_id.split(':')[1])
+
+ @property
+ def labels(self):
+ """
+ The labels of a container as dictionary.
+ """
+ try:
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+ except KeyError:
+ raise DockerException(
+ 'Label data is not available for sparse objects. Call reload()'
+ ' to retrieve all information'
+ )
+
+ @property
+ def status(self):
+ """
+ The status of the container. For example, ``running``, or ``exited``.
+ """
+ if isinstance(self.attrs['State'], dict):
+ return self.attrs['State']['Status']
+ return self.attrs['State']
+
+ def attach(self, **kwargs):
+ """
+ Attach to this container.
+
+ :py:meth:`logs` is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach(self.id, **kwargs)
+
+ def attach_socket(self, **kwargs):
+ """
+ Like :py:meth:`attach`, but returns the underlying socket-like object
+ for the HTTP request.
+
+ Args:
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach_socket(self.id, **kwargs)
+
+ def commit(self, repository=None, tag=None, **kwargs):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ resp = self.client.api.commit(self.id, repository=repository, tag=tag,
+ **kwargs)
+ return self.client.images.get(resp['Id'])
+
+ def diff(self):
+ """
+ Inspect changes on a container's filesystem.
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.diff(self.id)
+
+ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
+ privileged=False, user='', detach=False, stream=False,
+ socket=False, environment=None, workdir=None):
+ """
+ Run a command inside this container. Similar to
+ ``docker exec``.
+
+ Args:
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations. Default: False
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+
+ Returns:
+ (ExecResult): A tuple of (exit_code, output)
+ exit_code: (int):
+ Exit code for the executed command or ``None`` if
+ either ``stream```or ``socket`` is ``True``.
+ output: (generator or str):
+ If ``stream=True``, a generator yielding response chunks.
+ If ``socket=True``, a socket object for the connection.
+ A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.exec_create(
+ self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
+ privileged=privileged, user=user, environment=environment,
+ workdir=workdir
+ )
+ exec_output = self.client.api.exec_start(
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ )
+ if socket or stream:
+ return ExecResult(None, exec_output)
+
+ return ExecResult(
+ self.client.api.exec_inspect(resp['Id'])['ExitCode'],
+ exec_output
+ )
+
+ def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of the container's filesystem as a tar archive.
+
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.export(self.id, chunk_size)
+
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Retrieve a file or folder from the container in the form of a tar
+ archive.
+
+ Args:
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.get_archive(self.id, path, chunk_size)
+
+ def kill(self, signal=None):
+ """
+ Kill or send a signal to the container.
+
+ Args:
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self.client.api.kill(self.id, signal=signal)
+
+ def logs(self, **kwargs):
+ """
+ Get logs from this container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
+
+ Returns:
+ (generator or str): Logs from the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.logs(self.id, **kwargs)
+
+ def pause(self):
+ """
+ Pauses all processes within this container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.pause(self.id)
+
+ def put_archive(self, path, data):
+ """
+ Insert a file or folder in this container using a tar archive as
+ source.
+
+ Args:
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
+ return self.client.api.put_archive(self.id, path, data)
+
+ def remove(self, **kwargs):
+ """
+ Remove this container. Similar to the ``docker rm`` command.
+
+ Args:
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_container(self.id, **kwargs)
+
+ def rename(self, name):
+ """
+ Rename this container. Similar to the ``docker rename`` command.
+
+ Args:
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.rename(self.id, name)
+
+ def resize(self, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.resize(self.id, height, width)
+
+ def restart(self, **kwargs):
+ """
+ Restart this container. Similar to the ``docker restart`` command.
+
+ Args:
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.restart(self.id, **kwargs)
+
+ def start(self, **kwargs):
+ """
+ Start this container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.start(self.id, **kwargs)
+
+ def stats(self, **kwargs):
+ """
+ Stream statistics for this container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stats(self.id, **kwargs)
+
+ def stop(self, **kwargs):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stop(self.id, **kwargs)
+
+ def top(self, **kwargs):
+ """
+ Display the running processes of the container.
+
+ Args:
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.top(self.id, **kwargs)
+
+ def unpause(self):
+ """
+ Unpause all processes within the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.unpause(self.id)
+
+ def update(self, **kwargs):
+ """
+ Update resource configuration of the containers.
+
+ Args:
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.update_container(self.id, **kwargs)
+
+ def wait(self, **kwargs):
+ """
+ Block until the container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.wait(self.id, **kwargs)
+
+
+class ContainerCollection(Collection):
+ model = Container
+
+ def run(self, image, command=None, stdout=True, stderr=False,
+ remove=False, **kwargs):
+ """
+ Run a container. By default, it will wait for the container to finish
+ and return its logs, similar to ``docker run``.
+
+ If the ``detach`` argument is ``True``, it will start the container
+ and immediately return a :py:class:`Container` object, similar to
+ ``docker run -d``.
+
+ Example:
+ Run a container and get its output:
+
+ >>> import docker
+ >>> client = docker.from_env()
+ >>> client.containers.run('alpine', 'echo hello world')
+ b'hello world\\n'
+
+ Run a container and detach:
+
+ >>> container = client.containers.run('bfirsh/reticulate-splines',
+ detach=True)
+ >>> container.logs()
+ 'Reticulating spline 1...\\nReticulating spline 2...\\n'
+
+ Args:
+ image (str): The image to run.
+ command (str or list): The command to run in the container.
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_count (int): Number of usable CPUs (Windows only).
+ cpu_percent (int): Usable percentage of the available CPUs
+ (Windows only).
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ detach (bool): Run container in the background and return a
+ :py:class:`Container` object.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file.
+ dns_search (:py:class:`list`): DNS search domains.
+ domainname (str or list): Set custom DNS search domains.
+ entrypoint (str or list): The entrypoint for the container.
+ environment (dict or list): Environment variables to set inside
+ the container, as a dictionary or a list of strings in the
+ format ``["SOMEVARIABLE=xxx"]``.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+ hostname (str): Optional hostname for the container.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ mac_address (str): MAC address to assign to the container.
+ mem_limit (int or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ intended unit.
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``volumes``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ name (str): The name for this container.
+ nano_cpus (int): CPU quota in units of 1e-9 CPUs.
+ network (str): Name of the network this container will be connected
+ to at creation time. You can connect to additional networks
+ using :py:meth:`Network.connect`. Incompatible with
+ ``network_mode``.
+ network_disabled (bool): Disable networking.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+
+ Incompatible with ``network``.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ Only used if the method needs to pull the requested image.
+ ports (dict): Ports to bind inside the container.
+
+ The keys of the dictionary are the ports to bind inside the
+ container, either as an integer or a string in the form
+ ``port/protocol``, where the protocol is either ``tcp`` or
+ ``udp``.
+
+ The values of the dictionary are the corresponding ports to
+ open on the host, which can be either:
+
+ - The port number, as an integer. For example,
+ ``{'2222/tcp': 3333}`` will expose port 2222 inside the
+ container as port 3333 on the host.
+ - ``None``, to assign a random host port. For example,
+ ``{'2222/tcp': None}``.
+ - A tuple of ``(address, port)`` if you want to specify the
+ host interface. For example,
+ ``{'1111/tcp': ('127.0.0.1', 1111)}``.
+ - A list of integers, if you want to bind multiple host ports
+ to a single container port. For example,
+ ``{'1111/tcp': [1234, 4567]}``.
+
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ remove (bool): Remove the container when it has finished running.
+ Default: ``False``.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+
+ For example:
+ ``{"Name": "on-failure", "MaximumRetryCount": 5}``
+
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ stdin_open (bool): Keep ``STDIN`` open even if not attached.
+ stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
+ Default: ``True``.
+ stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
+ Default: ``False``.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ stream (bool): If true and ``detach`` is false, return a log
+ generator instead of a string. Ignored if ``detach`` is true.
+ Default: ``False``.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ tty (bool): Allocate a pseudo-TTY.
+ ulimits (:py:class:`list`): Ulimits to set inside the container, as
+ a list of dicts.
+ user (str or int): Username or UID to run commands as inside the
+ container.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volume_driver (str): The name of a volume driver/plugin.
+ volumes (dict or list): A dictionary to configure volumes mounted
+ inside the container. The key is either the host path or a
+ volume name, and the value is a dictionary with the keys:
+
+ - ``bind`` The path to mount the volume inside the container
+ - ``mode`` Either ``rw`` to mount the volume read/write, or
+ ``ro`` to mount it read-only.
+
+ For example:
+
+ .. code-block:: python
+
+ {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
+ '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ working_dir (str): Path to the working directory.
+ runtime (str): Runtime to use with this container.
+
+ Returns:
+ The container logs, either ``STDOUT``, ``STDERR``, or both,
+ depending on the value of the ``stdout`` and ``stderr`` arguments.
+
+ ``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
+ or ``journald`` logging driver used. Thus, if you are using none of
+ these drivers, a ``None`` object is returned instead. See the
+ `Engine API documentation
+ <https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
+ for full details.
+
+ If ``detach`` is ``True``, a :py:class:`Container` object is
+ returned instead.
+
+ Raises:
+ :py:class:`docker.errors.ContainerError`
+ If the container exits with a non-zero exit code and
+ ``detach`` is ``False``.
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ stream = kwargs.pop('stream', False)
+ detach = kwargs.pop('detach', False)
+ platform = kwargs.pop('platform', None)
+
+ if detach and remove:
+ if version_gte(self.client.api._version, '1.25'):
+ kwargs["auto_remove"] = True
+ else:
+ raise RuntimeError("The options 'detach' and 'remove' cannot "
+ "be used together in api versions < 1.25.")
+
+ if kwargs.get('network') and kwargs.get('network_mode'):
+ raise RuntimeError(
+ 'The options "network" and "network_mode" can not be used '
+ 'together.'
+ )
+
+ try:
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+ except ImageNotFound:
+ self.client.images.pull(image, platform=platform)
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+
+ container.start()
+
+ if detach:
+ return container
+
+ logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
+
+ out = None
+ if logging_driver == 'json-file' or logging_driver == 'journald':
+ out = container.logs(
+ stdout=stdout, stderr=stderr, stream=True, follow=True
+ )
+
+ exit_status = container.wait()['StatusCode']
+ if exit_status != 0:
+ out = None
+ if not kwargs.get('auto_remove'):
+ out = container.logs(stdout=False, stderr=True)
+
+ if remove:
+ container.remove()
+ if exit_status != 0:
+ raise ContainerError(
+ container, exit_status, command, image, out
+ )
+
+ return out if stream or out is None else b''.join(
+ [line for line in out]
+ )
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a container without starting it. Similar to ``docker create``.
+
+ Takes the same arguments as :py:meth:`run`, except for ``stdout``,
+ ``stderr``, and ``remove``.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ kwargs['image'] = image
+ kwargs['command'] = command
+ kwargs['version'] = self.client.api._version
+ create_kwargs = _create_container_args(kwargs)
+ resp = self.client.api.create_container(**create_kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, container_id):
+ """
+ Get a container by name or ID.
+
+ Args:
+ container_id (str): Container name or ID.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the container does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.inspect_container(container_id)
+ return self.prepare_model(resp)
+
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None,
+ sparse=False):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ sparse (bool): Do not inspect containers. Returns partial
+ information, but guaranteed not to block. Use
+ :py:meth:`Container.reload` on resulting objects to retrieve
+ all attributes. Default: ``False``
+
+ Returns:
+ (list of :py:class:`Container`)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.containers(all=all, before=before,
+ filters=filters, limit=limit,
+ since=since)
+ if sparse:
+ return [self.prepare_model(r) for r in resp]
+ else:
+ return [self.get(r['Id']) for r in resp]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_containers(filters=filters)
+ prune.__doc__ = APIClient.prune_containers.__doc__
+
+
+# kwargs to copy straight from run to create
+RUN_CREATE_KWARGS = [
+ 'command',
+ 'detach',
+ 'domainname',
+ 'entrypoint',
+ 'environment',
+ 'healthcheck',
+ 'hostname',
+ 'image',
+ 'labels',
+ 'mac_address',
+ 'name',
+ 'network_disabled',
+ 'stdin_open',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'volume_driver',
+ 'working_dir',
+]
+
+# kwargs to copy straight from run to host_config
+RUN_HOST_CONFIG_KWARGS = [
+ 'auto_remove',
+ 'blkio_weight_device',
+ 'blkio_weight',
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_period',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpuset_cpus',
+ 'cpuset_mems',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
+ 'device_cgroup_rules',
+ 'device_read_bps',
+ 'device_read_iops',
+ 'device_write_bps',
+ 'device_write_iops',
+ 'devices',
+ 'dns_opt',
+ 'dns_search',
+ 'dns',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'init_path',
+ 'ipc_mode',
+ 'isolation',
+ 'kernel_memory',
+ 'links',
+ 'log_config',
+ 'lxc_conf',
+ 'mem_limit',
+ 'mem_reservation',
+ 'mem_swappiness',
+ 'memswap_limit',
+ 'mounts',
+ 'nano_cpus',
+ 'network_mode',
+ 'oom_kill_disable',
+ 'oom_score_adj',
+ 'pid_mode',
+ 'pids_limit',
+ 'privileged',
+ 'publish_all_ports',
+ 'read_only',
+ 'restart_policy',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'tmpfs',
+ 'ulimits',
+ 'userns_mode',
+ 'version',
+ 'volumes_from',
+ 'runtime'
+]
+
+
+def _create_container_args(kwargs):
+ """
+ Convert arguments to create() to arguments to create_container().
+ """
+ # Copy over kwargs which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_CREATE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ host_config_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_HOST_CONFIG_KWARGS:
+ host_config_kwargs[key] = kwargs.pop(key)
+
+ # Process kwargs which are split over both create and host_config
+ ports = kwargs.pop('ports', {})
+ if ports:
+ host_config_kwargs['port_bindings'] = ports
+
+ volumes = kwargs.pop('volumes', {})
+ if volumes:
+ host_config_kwargs['binds'] = volumes
+
+ network = kwargs.pop('network', None)
+ if network:
+ create_kwargs['networking_config'] = {network: None}
+ host_config_kwargs['network_mode'] = network
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error('run', kwargs)
+
+ create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
+
+ # Fill in any kwargs which need processing by create_host_config first
+ port_bindings = create_kwargs['host_config'].get('PortBindings')
+ if port_bindings:
+ # sort to make consistent for tests
+ create_kwargs['ports'] = [tuple(p.split('/', 1))
+ for p in sorted(port_bindings.keys())]
+ if volumes:
+ if isinstance(volumes, dict):
+ create_kwargs['volumes'] = [
+ v.get('bind') for v in volumes.values()
+ ]
+ else:
+ create_kwargs['volumes'] = [
+ _host_volume_from_bind(v) for v in volumes
+ ]
+ return create_kwargs
+
+
+def _host_volume_from_bind(bind):
+ drive, rest = ntpath.splitdrive(bind)
+ bits = rest.split(':', 1)
+ if len(bits) == 1 or bits[1] in ('ro', 'rw'):
+ return drive + bits[0]
+ else:
+ return bits[1].rstrip(':ro').rstrip(':rw')
+
+
+ExecResult = namedtuple('ExecResult', 'exit_code,output')
+""" A result of Container.exec_run with the properties ``exit_code`` and
+ ``output``. """
diff --git a/docker/models/images.py b/docker/models/images.py
new file mode 100644
index 0000000..d4893bb
--- /dev/null
+++ b/docker/models/images.py
@@ -0,0 +1,443 @@
+import itertools
+import re
+
+import six
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import BuildError, ImageLoadError, InvalidArgument
+from ..utils import parse_repository_tag
+from ..utils.json_stream import json_stream
+from .resource import Collection, Model
+
+
+class Image(Model):
+ """
+ An image on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+
+ @property
+ def labels(self):
+ """
+ The labels of an image as dictionary.
+ """
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ if self.id.startswith('sha256:'):
+ return self.id[:17]
+ return self.id[:10]
+
+ @property
+ def tags(self):
+ """
+ The image's tags.
+ """
+ tags = self.attrs.get('RepoTags')
+ if tags is None:
+ tags = []
+ return [tag for tag in tags if tag != '<none>:<none>']
+
+ def history(self):
+ """
+ Show the history of an image.
+
+ Returns:
+ (str): The history of the image.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.history(self.id)
+
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ return self.client.api.get_image(self.id, chunk_size)
+
+ def tag(self, repository, tag=None, **kwargs):
+ """
+ Tag this image into a repository. Similar to the ``docker tag``
+ command.
+
+ Args:
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ (bool): ``True`` if successful
+ """
+ return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+
+
+class RegistryData(Model):
+ """
+ Image metadata stored on the registry, including available platforms.
+ """
+ def __init__(self, image_name, *args, **kwargs):
+ super(RegistryData, self).__init__(*args, **kwargs)
+ self.image_name = image_name
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs['Descriptor']['digest']
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ return self.id[:17]
+
+ def pull(self, platform=None):
+ """
+ Pull the image digest.
+
+ Args:
+ platform (str): The platform to pull the image for.
+ Default: ``None``
+
+ Returns:
+ (:py:class:`Image`): A reference to the pulled image.
+ """
+ repository, _ = parse_repository_tag(self.image_name)
+ return self.collection.pull(repository, tag=self.id, platform=platform)
+
+ def has_platform(self, platform):
+ """
+ Check whether the given platform identifier is available for this
+ digest.
+
+ Args:
+ platform (str or dict): A string using the ``os[/arch[/variant]]``
+ format, or a platform dictionary.
+
+ Returns:
+ (bool): ``True`` if the platform is recognized as available,
+ ``False`` otherwise.
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the platform argument is not a valid descriptor.
+ """
+ if platform and not isinstance(platform, dict):
+ parts = platform.split('/')
+ if len(parts) > 3 or len(parts) < 1:
+ raise InvalidArgument(
+ '"{0}" is not a valid platform descriptor'.format(platform)
+ )
+ platform = {'os': parts[0]}
+ if len(parts) > 2:
+ platform['variant'] = parts[2]
+ if len(parts) > 1:
+ platform['architecture'] = parts[1]
+ return normalize_platform(
+ platform, self.client.version()
+ ) in self.attrs['Platforms']
+
+ def reload(self):
+ self.attrs = self.client.api.inspect_distribution(self.image_name)
+
+ reload.__doc__ = Model.reload.__doc__
+
+
+class ImageCollection(Collection):
+ model = Image
+
+ def build(self, **kwargs):
+ """
+ Build an image and return it. Similar to the ``docker build``
+ command. Either ``path`` or ``fileobj`` must be set.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ If you want to get the raw output of the build, use the
+ :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
+ low-level API.
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (list): A list of images used for build cache
+ resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+
+ Returns:
+ (tuple): The first item is the :py:class:`Image` object for the
+ image that was build. The second item is a generator of the
+ build logs as JSON-decoded objects.
+
+ Raises:
+ :py:class:`docker.errors.BuildError`
+ If there is an error during the build.
+ :py:class:`docker.errors.APIError`
+ If the server returns any other error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ resp = self.client.api.build(**kwargs)
+ if isinstance(resp, six.string_types):
+ return self.get(resp)
+ last_event = None
+ image_id = None
+ result_stream, internal_stream = itertools.tee(json_stream(resp))
+ for chunk in internal_stream:
+ if 'error' in chunk:
+ raise BuildError(chunk['error'], result_stream)
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Successfully built |sha256:)([0-9a-f]+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ last_event = chunk
+ if image_id:
+ return (self.get(image_id), result_stream)
+ raise BuildError(last_event or 'Unknown', result_stream)
+
+ def get(self, name):
+ """
+ Gets an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`Image`): The image.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_image(name))
+
+ def get_registry_data(self, name):
+ """
+ Gets the registry data for an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`RegistryData`): The data object.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return RegistryData(
+ image_name=name,
+ attrs=self.client.api.inspect_distribution(name),
+ client=self.client,
+ collection=self,
+ )
+
+ def list(self, name=None, all=False, filters=None):
+ """
+ List images on the server.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.images(name=name, all=all, filters=filters)
+ return [self.get(r["Id"]) for r in resp]
+
+ def load(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
+ Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.load_image(data)
+ images = []
+ for chunk in resp:
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Loaded image ID: |^Loaded image: )(.+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ images.append(image_id)
+ if 'error' in chunk:
+ raise ImageLoadError(chunk['error'])
+
+ return [self.get(i) for i in images]
+
+ def pull(self, repository, tag=None, **kwargs):
+ """
+ Pull an image of the given name and return it. Similar to the
+ ``docker pull`` command.
+ If no tag is specified, all tags from that repository will be
+ pulled.
+
+ If you want to get the raw pull output, use the
+ :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
+ low-level API.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.client.DockerClient.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+
+ Returns:
+ (:py:class:`Image` or list): The image that has been pulled.
+ If no ``tag`` was specified, the method will return a list
+ of :py:class:`Image` objects belonging to this repository.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> # Pull the image tagged `latest` in the busybox repo
+ >>> image = client.images.pull('busybox:latest')
+
+ >>> # Pull all tags in the busybox repo
+ >>> images = client.images.pull('busybox')
+ """
+ if not tag:
+ repository, tag = parse_repository_tag(repository)
+
+ self.client.api.pull(repository, tag=tag, **kwargs)
+ if tag:
+ return self.get('{0}{2}{1}'.format(
+ repository, tag, '@' if tag.startswith('sha256:') else ':'
+ ))
+ return self.list(repository)
+
+ def push(self, repository, tag=None, **kwargs):
+ return self.client.api.push(repository, tag=tag, **kwargs)
+ push.__doc__ = APIClient.push.__doc__
+
+ def remove(self, *args, **kwargs):
+ self.client.api.remove_image(*args, **kwargs)
+ remove.__doc__ = APIClient.remove_image.__doc__
+
+ def search(self, *args, **kwargs):
+ return self.client.api.search(*args, **kwargs)
+ search.__doc__ = APIClient.search.__doc__
+
+ def prune(self, filters=None):
+ return self.client.api.prune_images(filters=filters)
+ prune.__doc__ = APIClient.prune_images.__doc__
+
+
+def normalize_platform(platform, engine_info):
+ if platform is None:
+ platform = {}
+ if 'os' not in platform:
+ platform['os'] = engine_info['Os']
+ if 'architecture' not in platform:
+ platform['architecture'] = engine_info['Arch']
+ return platform
diff --git a/docker/models/networks.py b/docker/models/networks.py
new file mode 100644
index 0000000..1c2fbf2
--- /dev/null
+++ b/docker/models/networks.py
@@ -0,0 +1,215 @@
+from ..api import APIClient
+from ..utils import version_gte
+from .containers import Container
+from .resource import Model, Collection
+
+
+class Network(Model):
+ """
+ A Docker network.
+ """
+ @property
+ def name(self):
+ """
+ The name of the network.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def containers(self):
+ """
+ The containers that are connected to the network, as a list of
+ :py:class:`~docker.models.containers.Container` objects.
+ """
+ return [
+ self.client.containers.get(cid) for cid in
+ (self.attrs.get('Containers') or {}).keys()
+ ]
+
+ def connect(self, container, *args, **kwargs):
+ """
+ Connect a container to this network.
+
+ Args:
+ container (str): Container to connect to this network, as either
+ an ID, name, or :py:class:`~docker.models.containers.Container`
+ object.
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linkedto this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.connect_container_to_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def disconnect(self, container, *args, **kwargs):
+ """
+ Disconnect a container from this network.
+
+ Args:
+ container (str): Container to disconnect from this network, as
+ either an ID, name, or
+ :py:class:`~docker.models.containers.Container` object.
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.disconnect_container_from_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def remove(self):
+ """
+ Remove this network.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_network(self.id)
+
+
+class NetworkCollection(Collection):
+ """
+ Networks on the Docker server.
+ """
+ model = Network
+
+ def create(self, name, *args, **kwargs):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (:py:class:`Network`): The network that was created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.networks.create("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.networks.create(
+ "network1",
+ driver="bridge",
+ ipam=ipam_config
+ )
+
+ """
+ resp = self.client.api.create_network(name, *args, **kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, network_id, *args, **kwargs):
+ """
+ Get a network by its ID.
+
+ Args:
+ network_id (str): The ID of the network.
+ verbose (bool): Retrieve the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+
+ Returns:
+ (:py:class:`Network`) The network.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the network does not exist.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
+
+ def list(self, *args, **kwargs):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by.
+ ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
+
+ Returns:
+ (list of :py:class:`Network`) The networks on the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ greedy = kwargs.pop('greedy', False)
+ resp = self.client.api.networks(*args, **kwargs)
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
+
+ def prune(self, filters=None):
+ self.client.api.prune_networks(filters=filters)
+ prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/docker/models/nodes.py b/docker/models/nodes.py
new file mode 100644
index 0000000..8dd9350
--- /dev/null
+++ b/docker/models/nodes.py
@@ -0,0 +1,107 @@
+from .resource import Model, Collection
+
+
+class Node(Model):
+ """A node in a swarm."""
+ id_attribute = 'ID'
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def update(self, node_spec):
+ """
+ Update the node's configuration.
+
+ Args:
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> node.update(node_spec)
+
+ """
+ return self.client.api.update_node(self.id, self.version, node_spec)
+
+ def remove(self, force=False):
+ """
+ Remove this node from the swarm.
+
+ Args:
+ force (bool): Force remove an active node. Default: `False`
+
+ Returns:
+ `True` if the request was successful.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_node(self.id, force=force)
+
+
+class NodeCollection(Collection):
+ """Nodes on the Docker server."""
+ model = Node
+
+ def get(self, node_id):
+ """
+ Get a node.
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A :py:class:`Node` object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_node(node_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of :py:class:`Node` objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.nodes.list(filters={'role': 'manager'})
+ """
+ return [
+ self.prepare_model(n)
+ for n in self.client.api.nodes(*args, **kwargs)
+ ]
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
new file mode 100644
index 0000000..0688018
--- /dev/null
+++ b/docker/models/plugins.py
@@ -0,0 +1,200 @@
+from .. import errors
+from .resource import Collection, Model
+
+
+class Plugin(Model):
+ """
+ A plugin on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ """
+ The plugin's name.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def enabled(self):
+ """
+ Whether the plugin is enabled.
+ """
+ return self.attrs.get('Enabled')
+
+ @property
+ def settings(self):
+ """
+ A dictionary representing the plugin's configuration.
+ """
+ return self.attrs.get('Settings')
+
+ def configure(self, options):
+ """
+ Update the plugin's settings.
+
+ Args:
+ options (dict): A key-value mapping of options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.configure_plugin(self.name, options)
+ self.reload()
+
+ def disable(self):
+ """
+ Disable the plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ self.client.api.disable_plugin(self.name)
+ self.reload()
+
+ def enable(self, timeout=0):
+ """
+ Enable the plugin.
+
+ Args:
+ timeout (int): Timeout in seconds. Default: 0
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.enable_plugin(self.name, timeout)
+ self.reload()
+
+ def push(self):
+ """
+ Push the plugin to a remote registry.
+
+ Returns:
+ A dict iterator streaming the status of the upload.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.push_plugin(self.name)
+
+ def remove(self, force=False):
+ """
+ Remove the plugin from the server.
+
+ Args:
+ force (bool): Remove even if the plugin is enabled.
+ Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_plugin(self.name, force=force)
+
+ def upgrade(self, remote=None):
+ """
+ Upgrade the plugin.
+
+ Args:
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ Default: this plugin's name.
+
+ Returns:
+ A generator streaming the decoded API logs
+ """
+ if self.enabled:
+ raise errors.DockerError(
+ 'Plugin must be disabled before upgrading.'
+ )
+
+ if remote is None:
+ remote = self.name
+ privileges = self.client.api.plugin_privileges(remote)
+ for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
+ yield d
+ self._reload()
+
+
+class PluginCollection(Collection):
+ model = Plugin
+
+ def create(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ (:py:class:`Plugin`): The newly created plugin.
+ """
+ self.client.api.create_plugin(name, plugin_data_dir, gzip)
+ return self.get(name)
+
+ def get(self, name):
+ """
+ Gets a plugin.
+
+ Args:
+ name (str): The name of the plugin.
+
+ Returns:
+ (:py:class:`Plugin`): The plugin.
+
+ Raises:
+ :py:class:`docker.errors.NotFound` If the plugin does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_plugin(name))
+
+ def install(self, remote_name, local_name=None):
+ """
+ Pull and install a plugin.
+
+ Args:
+ remote_name (string): Remote reference for the plugin to
+ install. The ``:latest`` tag is optional, and is the
+ default if omitted.
+ local_name (string): Local name for the pulled plugin.
+ The ``:latest`` tag is optional, and is the default if
+ omitted. Optional.
+
+ Returns:
+ (:py:class:`Plugin`): The installed plugin
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ privileges = self.client.api.plugin_privileges(remote_name)
+ it = self.client.api.pull_plugin(remote_name, privileges, local_name)
+ for data in it:
+ pass
+ return self.get(local_name or remote_name)
+
+ def list(self):
+ """
+ List plugins installed on the server.
+
+ Returns:
+ (list of :py:class:`Plugin`): The plugins.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.plugins()
+ return [self.prepare_model(r) for r in resp]
diff --git a/docker/models/resource.py b/docker/models/resource.py
new file mode 100644
index 0000000..ed3900a
--- /dev/null
+++ b/docker/models/resource.py
@@ -0,0 +1,93 @@
+
+class Model(object):
+ """
+ A base class for representing a single object on the server.
+ """
+ id_attribute = 'Id'
+
+ def __init__(self, attrs=None, client=None, collection=None):
+ #: A client pointing at the server that this object is on.
+ self.client = client
+
+ #: The collection that this model is part of.
+ self.collection = collection
+
+ #: The raw representation of this object from the API
+ self.attrs = attrs
+ if self.attrs is None:
+ self.attrs = {}
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.id == other.id
+
+ def __hash__(self):
+ return hash("%s:%s" % (self.__class__.__name__, self.id))
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs.get(self.id_attribute)
+
+ @property
+ def short_id(self):
+ """
+ The ID of the object, truncated to 10 characters.
+ """
+ return self.id[:10]
+
+ def reload(self):
+ """
+ Load this object from the server again and update ``attrs`` with the
+ new data.
+ """
+ new_model = self.collection.get(self.id)
+ self.attrs = new_model.attrs
+
+
+class Collection(object):
+ """
+ A base class for representing all objects of a particular type on the
+ server.
+ """
+
+ #: The type of object this collection represents, set by subclasses
+ model = None
+
+ def __init__(self, client=None):
+ #: The client pointing at the server that this collection of objects
+ #: is on.
+ self.client = client
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError(
+ "'{}' object is not callable. You might be trying to use the old "
+ "(pre-2.0) API - use docker.APIClient if so."
+ .format(self.__class__.__name__))
+
+ def list(self):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+ def create(self, attrs=None):
+ raise NotImplementedError
+
+ def prepare_model(self, attrs):
+ """
+ Create a model from a set of attributes.
+ """
+ if isinstance(attrs, Model):
+ attrs.client = self.client
+ attrs.collection = self
+ return attrs
+ elif isinstance(attrs, dict):
+ return self.model(attrs=attrs, client=self.client, collection=self)
+ else:
+ raise Exception("Can't create %s from %s" %
+ (self.model.__name__, attrs))
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
new file mode 100644
index 0000000..ca11ede
--- /dev/null
+++ b/docker/models/secrets.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Secret(Model):
+ """A secret."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this secret.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If secret failed to remove.
+ """
+ return self.client.api.remove_secret(self.id)
+
+
+class SecretCollection(Collection):
+ """Secrets on the Docker server."""
+ model = Secret
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_secret(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_secret.__doc__
+
+ def get(self, secret_id):
+ """
+ Get a secret.
+
+ Args:
+ secret_id (str): Secret ID.
+
+ Returns:
+ (:py:class:`Secret`): The secret.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the secret does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_secret(secret_id))
+
+ def list(self, **kwargs):
+ """
+ List secrets. Similar to the ``docker secret ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Secret`): The secrets.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.secrets(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/services.py b/docker/models/services.py
new file mode 100644
index 0000000..125896b
--- /dev/null
+++ b/docker/models/services.py
@@ -0,0 +1,352 @@
+import copy
+from docker.errors import create_unexpected_kwargs_error, InvalidArgument
+from docker.types import TaskTemplate, ContainerSpec, ServiceMode
+from .resource import Model, Collection
+
+
+class Service(Model):
+ """A service."""
+ id_attribute = 'ID'
+
+ @property
+ def name(self):
+ """The service's name."""
+ return self.attrs['Spec']['Name']
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def remove(self):
+ """
+ Stop and remove the service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_service(self.id)
+
+ def tasks(self, filters=None):
+ """
+ List the tasks in this service.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``node``,
+ ``label``, and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if filters is None:
+ filters = {}
+ filters['service'] = self.id
+ return self.client.api.tasks(filters=filters)
+
+ def update(self, **kwargs):
+ """
+ Update a service's configuration. Similar to the ``docker service
+ update`` command.
+
+ Takes the same parameters as :py:meth:`~ServiceCollection.create`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # Image is required, so if it hasn't been set, use current image
+ if 'image' not in kwargs:
+ spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ kwargs['image'] = spec['Image']
+
+ if kwargs.get('force_update') is True:
+ task_template = self.attrs['Spec']['TaskTemplate']
+ current_value = int(task_template.get('ForceUpdate', 0))
+ kwargs['force_update'] = current_value + 1
+
+ create_kwargs = _get_create_service_kwargs('update', kwargs)
+
+ return self.client.api.update_service(
+ self.id,
+ self.version,
+ **create_kwargs
+ )
+
+ def logs(self, **kwargs):
+ """
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns (generator): Logs for the service.
+ """
+ is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
+ 'TTY', False
+ )
+ return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
+
+ def scale(self, replicas):
+ """
+ Scale service container.
+
+ Args:
+ replicas (int): The number of containers that should be running.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ if 'Global' in self.attrs['Spec']['Mode'].keys():
+ raise InvalidArgument('Cannot scale a global container')
+
+ service_mode = ServiceMode('replicated', replicas)
+ return self.client.api.update_service(self.id, self.version,
+ service_mode,
+ fetch_current_spec=True)
+
+ def force_update(self):
+ """
+ Force update the service even if no changes require it.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ return self.update(force_update=True, fetch_current_spec=True)
+
+
+class ServiceCollection(Collection):
+ """Services on the Docker server."""
+ model = Service
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a service. Similar to the ``docker service create`` command.
+
+ Args:
+ image (str): The image name to use for the containers.
+ command (list of str or str): Command to run.
+ args (list of str): Arguments to the command.
+ constraints (list of str): Placement constraints.
+ container_labels (dict): Labels to apply to the container.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ env (list of str): Environment variables, in the form
+ ``KEY=val``.
+ hostname (string): Hostname to set on the container.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ labels (dict): Labels to apply to the service.
+ log_driver (str): Log driver to use for containers.
+ log_driver_options (dict): Log driver options.
+ mode (ServiceMode): Scheduling mode for the service.
+ Default:``None``
+ mounts (list of str): Mounts for the containers, in the form
+ ``source:target:options``, where options is either
+ ``ro`` or ``rw``.
+ name (str): Name to give to the service.
+ networks (list of str): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ resources (Resources): Resource limits and reservations.
+ restart_policy (RestartPolicy): Restart policy for containers.
+ secrets (list of :py:class:`docker.types.SecretReference`): List
+ of secrets accessible to containers for this service.
+ stop_grace_period (int): Amount of time to wait for
+ containers to terminate before forcefully killing them.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ user (str): User to run commands as.
+ workdir (str): Working directory for commands to run.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's `hosts` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference`
+ that will be exposed to the service.
+ privileges (Privileges): Security options for the service's
+ containers.
+
+ Returns:
+ (:py:class:`Service`) The created service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ kwargs['image'] = image
+ kwargs['command'] = command
+ create_kwargs = _get_create_service_kwargs('create', kwargs)
+ service_id = self.client.api.create_service(**create_kwargs)
+ return self.get(service_id)
+
+ def get(self, service_id, insert_defaults=None):
+ """
+ Get a service.
+
+ Args:
+ service_id (str): The ID of the service.
+ insert_defaults (boolean): If true, default values will be merged
+ into the output.
+
+ Returns:
+ (:py:class:`Service`): The service.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the service does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.InvalidVersion`
+ If one of the arguments is not supported with the current
+ API version.
+ """
+ return self.prepare_model(
+ self.client.api.inspect_service(service_id, insert_defaults)
+ )
+
+ def list(self, **kwargs):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+
+ Returns:
+ (list of :py:class:`Service`): The services.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return [
+ self.prepare_model(s)
+ for s in self.client.api.services(**kwargs)
+ ]
+
+
+# kwargs to copy straight over to ContainerSpec
+CONTAINER_SPEC_KWARGS = [
+ 'args',
+ 'command',
+ 'configs',
+ 'dns_config',
+ 'env',
+ 'groups',
+ 'healthcheck',
+ 'hostname',
+ 'hosts',
+ 'image',
+ 'isolation',
+ 'labels',
+ 'mounts',
+ 'open_stdin',
+ 'privileges'
+ 'read_only',
+ 'secrets',
+ 'stop_grace_period',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'workdir',
+]
+
+# kwargs to copy straight over to TaskTemplate
+TASK_TEMPLATE_KWARGS = [
+ 'networks',
+ 'resources',
+ 'restart_policy',
+]
+
+# kwargs to copy straight over to create_service
+CREATE_SERVICE_KWARGS = [
+ 'name',
+ 'labels',
+ 'mode',
+ 'update_config',
+ 'endpoint_spec',
+]
+
+
+def _get_create_service_kwargs(func_name, kwargs):
+ # Copy over things which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CREATE_SERVICE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ container_spec_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CONTAINER_SPEC_KWARGS:
+ container_spec_kwargs[key] = kwargs.pop(key)
+ task_template_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in TASK_TEMPLATE_KWARGS:
+ task_template_kwargs[key] = kwargs.pop(key)
+
+ if 'container_labels' in kwargs:
+ container_spec_kwargs['labels'] = kwargs.pop('container_labels')
+
+ if 'constraints' in kwargs:
+ task_template_kwargs['placement'] = {
+ 'Constraints': kwargs.pop('constraints')
+ }
+
+ if 'log_driver' in kwargs:
+ task_template_kwargs['log_driver'] = {
+ 'Name': kwargs.pop('log_driver'),
+ 'Options': kwargs.pop('log_driver_options', {})
+ }
+
+ if func_name == 'update':
+ if 'force_update' in kwargs:
+ task_template_kwargs['force_update'] = kwargs.pop('force_update')
+
+ # fetch the current spec by default if updating the service
+ # through the model
+ fetch_current_spec = kwargs.pop('fetch_current_spec', True)
+ create_kwargs['fetch_current_spec'] = fetch_current_spec
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error(func_name, kwargs)
+
+ container_spec = ContainerSpec(**container_spec_kwargs)
+ task_template_kwargs['container_spec'] = container_spec
+ create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
+ return create_kwargs
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
new file mode 100644
index 0000000..7396e73
--- /dev/null
+++ b/docker/models/swarm.py
@@ -0,0 +1,168 @@
+from docker.api import APIClient
+from docker.errors import APIError
+from .resource import Model
+
+
+class Swarm(Model):
+ """
+ The server's Swarm state. This a singleton that must be reloaded to get
+ the current state of the Swarm.
+ """
+ id_attribute = 'ID'
+
+ def __init__(self, *args, **kwargs):
+ super(Swarm, self).__init__(*args, **kwargs)
+ if self.client:
+ try:
+ self.reload()
+ except APIError as e:
+ # FIXME: https://github.com/docker/docker/issues/29192
+ if e.response.status_code not in (406, 503):
+ raise
+
+ @property
+ def version(self):
+ """
+ The version number of the swarm. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def get_unlock_key(self):
+ return self.client.api.get_unlock_key()
+ get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
+
+ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, **kwargs):
+ """
+ Initialize a new swarm on this Engine.
+
+ Args:
+ advertise_addr (str): Externally reachable address advertised to
+ other nodes. This can either be an address/port combination in
+ the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used.
+
+ If not specified, it will be automatically detected when
+ possible.
+ listen_addr (str): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: ``0.0.0.0:2377``
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.swarm.init(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, snapshot_interval=5000,
+ log_entries_for_slow_followers=1200
+ )
+
+ """
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster
+ }
+ init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
+ self.client.api.init_swarm(**init_kwargs)
+ self.reload()
+
+ def join(self, *args, **kwargs):
+ return self.client.api.join_swarm(*args, **kwargs)
+ join.__doc__ = APIClient.join_swarm.__doc__
+
+ def leave(self, *args, **kwargs):
+ return self.client.api.leave_swarm(*args, **kwargs)
+ leave.__doc__ = APIClient.leave_swarm.__doc__
+
+ def reload(self):
+ """
+ Inspect the swarm on the server and store the response in
+ :py:attr:`attrs`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.attrs = self.client.api.inspect_swarm()
+
+ def unlock(self, key):
+ return self.client.api.unlock_swarm(key)
+ unlock.__doc__ = APIClient.unlock_swarm.__doc__
+
+ def update(self, rotate_worker_token=False, rotate_manager_token=False,
+ **kwargs):
+ """
+ Update the swarm's configuration.
+
+ It takes the same arguments as :py:meth:`init`, except
+ ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
+ addition, it takes these arguments:
+
+ Args:
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ # this seems to have to be set
+ if kwargs.get('node_cert_expiry') is None:
+ kwargs['node_cert_expiry'] = 7776000000000000
+
+ return self.client.api.update_swarm(
+ version=self.version,
+ swarm_spec=self.client.api.create_swarm_spec(**kwargs),
+ rotate_worker_token=rotate_worker_token,
+ rotate_manager_token=rotate_manager_token
+ )
diff --git a/docker/models/volumes.py b/docker/models/volumes.py
new file mode 100644
index 0000000..3c2e837
--- /dev/null
+++ b/docker/models/volumes.py
@@ -0,0 +1,99 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Volume(Model):
+ """A volume."""
+ id_attribute = 'Name'
+
+ @property
+ def name(self):
+ """The name of the volume."""
+ return self.attrs['Name']
+
+ def remove(self, force=False):
+ """
+ Remove this volume.
+
+ Args:
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ return self.client.api.remove_volume(self.id, force=force)
+
+
+class VolumeCollection(Collection):
+ """Volumes on the Docker server."""
+ model = Volume
+
+ def create(self, name=None, **kwargs):
+ """
+ Create a volume.
+
+ Args:
+ name (str): Name of the volume. If not specified, the engine
+ generates a name.
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (:py:class:`Volume`): The volume created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.volumes.create(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+
+ """
+ obj = self.client.api.create_volume(name, **kwargs)
+ return self.prepare_model(obj)
+
+ def get(self, volume_id):
+ """
+ Get a volume.
+
+ Args:
+ volume_id (str): Volume name.
+
+ Returns:
+ (:py:class:`Volume`): The volume.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the volume does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_volume(volume_id))
+
+ def list(self, **kwargs):
+ """
+ List volumes. Similar to the ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Volume`): The volumes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.volumes(**kwargs)
+ if not resp.get('Volumes'):
+ return []
+ return [self.prepare_model(obj) for obj in resp['Volumes']]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_volumes(filters=filters)
+ prune.__doc__ = APIClient.prune_volumes.__doc__
diff --git a/docker/ssladapter/__init__.py b/docker/ssladapter/__init__.py
deleted file mode 100644
index 1a5e1bb..0000000
--- a/docker/ssladapter/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .ssladapter import SSLAdapter # flake8: noqa
diff --git a/docker/tls.py b/docker/tls.py
index 7abfa60..4900e9f 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,10 +2,24 @@ import os
import ssl
from . import errors
-from .ssladapter import ssladapter
+from .transport import SSLAdapter
class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
cert = None
ca_cert = None
verify = None
@@ -23,13 +37,33 @@ class TLSConfig(object):
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
- # ways: https://github.com/docker/docker-py/issues/963
+ # TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
+ # depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
+ # to exclude versions. But I think that might require a bigger
+ # architectural change, so I've opted not to pursue it at this time
- self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
+ # If the user provides an SSL version, we should use their preference
+ if ssl_version:
+ self.ssl_version = ssl_version
+ else:
+ # If the user provides no ssl version, we should default to
+ # TLSv1_2. This option is the most secure, and will work for the
+ # majority of users with reasonably up-to-date software. However,
+ # before doing so, detect openssl version to ensure we can support
+ # it.
+ if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
+ ssl, 'PROTOCOL_TLSv1_2'):
+ # If the OpenSSL version is high enough to support TLSv1_2,
+ # then we should use it.
+ self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
+ else:
+ # Otherwise, TLS v1.0 seems to be the safest default;
+ # SSLv23 fails in mysterious ways:
+ # https://github.com/docker/docker-py/issues/963
+ self.ssl_version = ssl.PROTOCOL_TLSv1
- # "tls" and "tls_verify" must have both or neither cert/key files
- # In either case, Alert the user when both are expected, but any are
+ # "tls" and "tls_verify" must have both or neither cert/key files In
+ # either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
@@ -42,7 +76,7 @@ class TLSConfig(object):
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
- not os.path.isfile(tls_key)):
+ not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
@@ -58,6 +92,9 @@ class TLSConfig(object):
)
def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
@@ -68,7 +105,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(
+ client.mount('https://', SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index 46dfdf8..abbee18 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,7 +1,8 @@
# flake8: noqa
from .unixconn import UnixAdapter
+from .ssladapter import SSLAdapter
try:
from .npipeconn import NpipeAdapter
from .npipesocket import NpipeSocket
except ImportError:
- pass \ No newline at end of file
+ pass
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index 017738e..ab9b904 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -69,12 +69,17 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class NpipeAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout']
+
def __init__(self, base_url, timeout=60,
- num_pools=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.pools = RecentlyUsedContainer(
- num_pools, dispose_func=lambda p: p.close()
+ pool_connections, dispose_func=lambda p: p.close()
)
super(NpipeAdapter, self).__init__()
@@ -96,7 +101,7 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
- # See also: https://github.com/docker/docker-py/issues/811
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
def close(self):
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index 3b1b644..c04b39d 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -29,6 +29,7 @@ class NpipeSocket(object):
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
+
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
diff --git a/docker/ssladapter/ssladapter.py b/docker/transport/ssladapter.py
index e17dfad..8fafec3 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -24,6 +24,11 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
+ 'assert_hostname',
+ 'ssl_version']
+
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index b7905a0..cc35d00 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -18,7 +18,19 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+class UnixHTTPResponse(httplib.HTTPResponse, object):
+ def __init__(self, sock, *args, **kwargs):
+ disable_buffering = kwargs.pop('disable_buffering', False)
+ if six.PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
+ super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
+
+
class UnixHTTPConnection(httplib.HTTPConnection, object):
+
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout
@@ -26,6 +38,7 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
+ self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -33,6 +46,17 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
sock.connect(self.unix_socket)
self.sock = sock
+ def putheader(self, header, *values):
+ super(UnixHTTPConnection, self).putheader(header, *values)
+ if header == 'Connection' and 'Upgrade' in values:
+ self.disable_buffering = True
+
+ def response_class(self, sock, *args, **kwargs):
+ if self.disable_buffering:
+ kwargs['disable_buffering'] = True
+
+ return UnixHTTPResponse(sock, *args, **kwargs)
+
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
@@ -50,15 +74,20 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class UnixAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout']
+
def __init__(self, socket_url, timeout=60,
- num_pools=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.pools = RecentlyUsedContainer(
- num_pools, dispose_func=lambda p: p.close()
+ pool_connections, dispose_func=lambda p: p.close()
)
super(UnixAdapter, self).__init__()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 3609581..0b0d847 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,7 +1,11 @@
# flake8: noqa
-from .containers import LogConfig, Ulimit
+from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .daemon import CancellableStream
+from .healthcheck import Healthcheck
+from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
- ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate,
- UpdateConfig
+ ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
+ Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
+ ServiceMode, TaskTemplate, UpdateConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 40a44ca..2521420 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,6 +1,13 @@
import six
+from .. import errors
+from ..utils.utils import (
+ convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
+ format_environment, format_extra_hosts, normalize_links, parse_bytes,
+ parse_devices, split_command, version_gte, version_lt,
+)
from .base import DictType
+from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
@@ -90,3 +97,502 @@ class Ulimit(DictType):
@hard.setter
def hard(self, value):
self['Hard'] = value
+
+
+class HostConfig(dict):
+ def __init__(self, version, binds=None, port_bindings=None,
+ lxc_conf=None, publish_all_ports=False, links=None,
+ privileged=False, dns=None, dns_search=None,
+ volumes_from=None, network_mode=None, restart_policy=None,
+ cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_reservation=None,
+ kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None, cpu_period=None,
+ blkio_weight=None, blkio_weight_device=None,
+ device_read_bps=None, device_write_bps=None,
+ device_read_iops=None, device_write_iops=None,
+ oom_kill_disable=False, shm_size=None, sysctls=None,
+ tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, pids_limit=None,
+ isolation=None, auto_remove=False, storage_opt=None,
+ init=None, init_path=None, volume_driver=None,
+ cpu_count=None, cpu_percent=None, nano_cpus=None,
+ cpuset_mems=None, runtime=None, mounts=None,
+ cpu_rt_period=None, cpu_rt_runtime=None,
+ device_cgroup_rules=None):
+
+ if mem_limit is not None:
+ self['Memory'] = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ self['MemorySwap'] = parse_bytes(memswap_limit)
+
+ if mem_reservation:
+ self['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ self['KernelMemory'] = parse_bytes(kernel_memory)
+
+ if mem_swappiness is not None:
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ self['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, six.string_types):
+ shm_size = parse_bytes(shm_size)
+
+ self['ShmSize'] = shm_size
+
+ if pid_mode:
+ if version_lt(version, '1.24') and pid_mode != 'host':
+ raise host_config_value_error('pid_mode', pid_mode)
+ self['PidMode'] = pid_mode
+
+ if ipc_mode:
+ self['IpcMode'] = ipc_mode
+
+ if privileged:
+ self['Privileged'] = privileged
+
+ if oom_kill_disable:
+ self['OomKillDisable'] = oom_kill_disable
+
+ if oom_score_adj:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('oom_score_adj', '1.22')
+ if not isinstance(oom_score_adj, int):
+ raise host_config_type_error(
+ 'oom_score_adj', oom_score_adj, 'int'
+ )
+ self['OomScoreAdj'] = oom_score_adj
+
+ if publish_all_ports:
+ self['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ self['ReadonlyRootfs'] = read_only
+
+ if dns_search:
+ self['DnsSearch'] = dns_search
+
+ if network_mode:
+ self['NetworkMode'] = network_mode
+ elif network_mode is None:
+ self['NetworkMode'] = 'default'
+
+ if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
+ self['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ self['CapAdd'] = cap_add
+
+ if cap_drop:
+ self['CapDrop'] = cap_drop
+
+ if devices:
+ self['Devices'] = parse_devices(devices)
+
+ if group_add:
+ self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+
+ if dns is not None:
+ self['Dns'] = dns
+
+ if dns_opt is not None:
+ self['DnsOptions'] = dns_opt
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise host_config_type_error(
+ 'security_opt', security_opt, 'list'
+ )
+
+ self['SecurityOpt'] = security_opt
+
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ self['Sysctls'] = {}
+ for k, v in six.iteritems(sysctls):
+ self['Sysctls'][k] = six.text_type(v)
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+
+ self['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ self['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ self['PortBindings'] = convert_port_bindings(port_bindings)
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = format_extra_hosts(extra_hosts)
+
+ self['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ self['Links'] = normalize_links(links)
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in six.iteritems(lxc_conf):
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ self['LxcConf'] = lxc_conf
+
+ if cgroup_parent is not None:
+ self['CgroupParent'] = cgroup_parent
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise host_config_type_error('ulimits', ulimits, 'list')
+ self['Ulimits'] = []
+ for l in ulimits:
+ if not isinstance(l, Ulimit):
+ l = Ulimit(**l)
+ self['Ulimits'].append(l)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
+ )
+ log_config = LogConfig(**log_config)
+
+ self['LogConfig'] = log_config
+
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
+ self['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
+ self['CpuPeriod'] = cpu_period
+
+ if cpu_shares:
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ self['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ self['CpusetCpus'] = cpuset_cpus
+
+ if cpuset_mems:
+ if not isinstance(cpuset_mems, str):
+ raise host_config_type_error(
+ 'cpuset_mems', cpuset_mems, 'str'
+ )
+ self['CpusetMems'] = cpuset_mems
+
+ if cpu_rt_period:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_period', '1.25')
+
+ if not isinstance(cpu_rt_period, int):
+ raise host_config_type_error(
+ 'cpu_rt_period', cpu_rt_period, 'int'
+ )
+ self['CPURealtimePeriod'] = cpu_rt_period
+
+ if cpu_rt_runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_runtime', '1.25')
+
+ if not isinstance(cpu_rt_runtime, int):
+ raise host_config_type_error(
+ 'cpu_rt_runtime', cpu_rt_runtime, 'int'
+ )
+ self['CPURealtimeRuntime'] = cpu_rt_runtime
+
+ if blkio_weight:
+ if not isinstance(blkio_weight, int):
+ raise host_config_type_error(
+ 'blkio_weight', blkio_weight, 'int'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight', '1.22')
+ self["BlkioWeight"] = blkio_weight
+
+ if blkio_weight_device:
+ if not isinstance(blkio_weight_device, list):
+ raise host_config_type_error(
+ 'blkio_weight_device', blkio_weight_device, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight_device', '1.22')
+ self["BlkioWeightDevice"] = blkio_weight_device
+
+ if device_read_bps:
+ if not isinstance(device_read_bps, list):
+ raise host_config_type_error(
+ 'device_read_bps', device_read_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_bps', '1.22')
+ self["BlkioDeviceReadBps"] = device_read_bps
+
+ if device_write_bps:
+ if not isinstance(device_write_bps, list):
+ raise host_config_type_error(
+ 'device_write_bps', device_write_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_bps', '1.22')
+ self["BlkioDeviceWriteBps"] = device_write_bps
+
+ if device_read_iops:
+ if not isinstance(device_read_iops, list):
+ raise host_config_type_error(
+ 'device_read_iops', device_read_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_iops', '1.22')
+ self["BlkioDeviceReadIOps"] = device_read_iops
+
+ if device_write_iops:
+ if not isinstance(device_write_iops, list):
+ raise host_config_type_error(
+ 'device_write_iops', device_write_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_iops', '1.22')
+ self["BlkioDeviceWriteIOps"] = device_write_iops
+
+ if tmpfs:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('tmpfs', '1.22')
+ self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ self['UsernsMode'] = userns_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ self["PidsLimit"] = pids_limit
+
+ if isolation:
+ if not isinstance(isolation, six.string_types):
+ raise host_config_type_error('isolation', isolation, 'string')
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('isolation', '1.24')
+ self['Isolation'] = isolation
+
+ if auto_remove:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('auto_remove', '1.25')
+ self['AutoRemove'] = auto_remove
+
+ if storage_opt is not None:
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('storage_opt', '1.24')
+ self['StorageOpt'] = storage_opt
+
+ if init is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init', '1.25')
+ self['Init'] = init
+
+ if init_path is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init_path', '1.25')
+
+ if version_gte(version, '1.29'):
+ # https://github.com/moby/moby/pull/32470
+ raise host_config_version_error('init_path', '1.29', False)
+ self['InitPath'] = init_path
+
+ if volume_driver is not None:
+ self['VolumeDriver'] = volume_driver
+
+ if cpu_count:
+ if not isinstance(cpu_count, int):
+ raise host_config_type_error('cpu_count', cpu_count, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_count', '1.25')
+
+ self['CpuCount'] = cpu_count
+
+ if cpu_percent:
+ if not isinstance(cpu_percent, int):
+ raise host_config_type_error('cpu_percent', cpu_percent, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_percent', '1.25')
+
+ self['CpuPercent'] = cpu_percent
+
+ if nano_cpus:
+ if not isinstance(nano_cpus, six.integer_types):
+ raise host_config_type_error('nano_cpus', nano_cpus, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('nano_cpus', '1.25')
+
+ self['NanoCpus'] = nano_cpus
+
+ if runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('runtime', '1.25')
+ self['Runtime'] = runtime
+
+ if mounts is not None:
+ if version_lt(version, '1.30'):
+ raise host_config_version_error('mounts', '1.30')
+ self['Mounts'] = mounts
+
+ if device_cgroup_rules is not None:
+ if version_lt(version, '1.28'):
+ raise host_config_version_error('device_cgroup_rules', '1.28')
+ if not isinstance(device_cgroup_rules, list):
+ raise host_config_type_error(
+ 'device_cgroup_rules', device_cgroup_rules, 'list'
+ )
+ self['DeviceCgroupRules'] = device_cgroup_rules
+
+
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+class ContainerConfig(dict):
+ def __init__(
+ self, version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, ports=None, environment=None,
+ volumes=None, network_disabled=False, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None, mac_address=None,
+ labels=None, stop_signal=None, networking_config=None,
+ healthcheck=None, stop_timeout=None, runtime=None
+ ):
+
+ if stop_timeout is not None and version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'stop_timeout was only introduced in API version 1.25'
+ )
+
+ if healthcheck is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'Health options were only introduced in API version 1.24'
+ )
+
+ if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
+ raise errors.InvalidVersion(
+ 'healthcheck start period was introduced in API '
+ 'version 1.29'
+ )
+
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+
+ if isinstance(environment, dict):
+ environment = format_environment(environment)
+
+ if isinstance(labels, list):
+ labels = dict((lbl, six.text_type('')) for lbl in labels)
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if healthcheck and isinstance(healthcheck, dict):
+ healthcheck = Healthcheck(**healthcheck)
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ self.update({
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': six.text_type(user) if user else None,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'WorkingDir': working_dir,
+ 'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ 'StopSignal': stop_signal,
+ 'Healthcheck': healthcheck,
+ 'StopTimeout': stop_timeout,
+ 'Runtime': runtime
+ })
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
new file mode 100644
index 0000000..852f3d8
--- /dev/null
+++ b/docker/types/daemon.py
@@ -0,0 +1,62 @@
+import socket
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+class CancellableStream(object):
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration
+ except socket.error:
+ raise StopIteration
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ else:
+ sock = sock_fp._sock
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
new file mode 100644
index 0000000..61857c2
--- /dev/null
+++ b/docker/types/healthcheck.py
@@ -0,0 +1,88 @@
+from .base import DictType
+
+import six
+
+
+class Healthcheck(DictType):
+ """
+ Defines a healthcheck configuration for a container or service.
+
+ Args:
+ test (:py:class:`list` or str): Test to perform to determine
+ container health. Possible values:
+
+ - Empty list: Inherit healthcheck from parent image
+ - ``["NONE"]``: Disable healthcheck
+ - ``["CMD", args...]``: exec arguments directly.
+ - ``["CMD-SHELL", command]``: RUn command in the system's
+ default shell.
+
+ If a string is provided, it will be used as a ``CMD-SHELL``
+ command.
+ interval (int): The time to wait between checks in nanoseconds. It
+ should be 0 or at least 1000000 (1 ms).
+ timeout (int): The time to wait before considering the check to
+ have hung. It should be 0 or at least 1000000 (1 ms).
+ retries (integer): The number of consecutive failures needed to
+ consider a container as unhealthy.
+ start_period (integer): Start period for the container to
+ initialize before starting health-retries countdown in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ """
+ def __init__(self, **kwargs):
+ test = kwargs.get('test', kwargs.get('Test'))
+ if isinstance(test, six.string_types):
+ test = ["CMD-SHELL", test]
+
+ interval = kwargs.get('interval', kwargs.get('Interval'))
+ timeout = kwargs.get('timeout', kwargs.get('Timeout'))
+ retries = kwargs.get('retries', kwargs.get('Retries'))
+ start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
+
+ super(Healthcheck, self).__init__({
+ 'Test': test,
+ 'Interval': interval,
+ 'Timeout': timeout,
+ 'Retries': retries,
+ 'StartPeriod': start_period
+ })
+
+ @property
+ def test(self):
+ return self['Test']
+
+ @test.setter
+ def test(self, value):
+ self['Test'] = value
+
+ @property
+ def interval(self):
+ return self['Interval']
+
+ @interval.setter
+ def interval(self, value):
+ self['Interval'] = value
+
+ @property
+ def timeout(self):
+ return self['Timeout']
+
+ @timeout.setter
+ def timeout(self, value):
+ self['Timeout'] = value
+
+ @property
+ def retries(self):
+ return self['Retries']
+
+ @retries.setter
+ def retries(self, value):
+ self['Retries'] = value
+
+ @property
+ def start_period(self):
+ return self['StartPeriod']
+
+ @start_period.setter
+ def start_period(self, value):
+ self['StartPeriod'] = value
diff --git a/docker/types/networks.py b/docker/types/networks.py
new file mode 100644
index 0000000..1c7b2c9
--- /dev/null
+++ b/docker/types/networks.py
@@ -0,0 +1,111 @@
+from .. import errors
+from ..utils import normalize_links, version_lt
+
+
+class EndpointConfig(dict):
+ def __init__(self, version, aliases=None, links=None, ipv4_address=None,
+ ipv6_address=None, link_local_ips=None):
+ if version_lt(version, '1.22'):
+ raise errors.InvalidVersion(
+ 'Endpoint config is not supported for API version < 1.22'
+ )
+
+ if aliases:
+ self["Aliases"] = aliases
+
+ if links:
+ self["Links"] = normalize_links(links)
+
+ ipam_config = {}
+ if ipv4_address:
+ ipam_config['IPv4Address'] = ipv4_address
+
+ if ipv6_address:
+ ipam_config['IPv6Address'] = ipv6_address
+
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
+ if ipam_config:
+ self['IPAMConfig'] = ipam_config
+
+
+class NetworkingConfig(dict):
+ def __init__(self, endpoints_config=None):
+ if endpoints_config:
+ self["EndpointsConfig"] = endpoints_config
+
+
+class IPAMConfig(dict):
+ """
+ Create an IPAM (IP Address Management) config dictionary to be used with
+ :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
+
+ Args:
+
+ driver (str): The IPAM driver to use. Defaults to ``default``.
+ pool_configs (:py:class:`list`): A list of pool configurations
+ (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
+ options (dict): Driver options as a key-value dictionary.
+ Defaults to `None`.
+
+ Example:
+
+ >>> ipam_config = docker.types.IPAMConfig(driver='default')
+ >>> network = client.create_network('network1', ipam=ipam_config)
+
+ """
+ def __init__(self, driver='default', pool_configs=None, options=None):
+ self.update({
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ })
+
+ if options:
+ if not isinstance(options, dict):
+ raise TypeError('IPAMConfig options must be a dictionary')
+ self['Options'] = options
+
+
+class IPAMPool(dict):
+ """
+ Create an IPAM pool config dictionary to be added to the
+ ``pool_configs`` parameter of
+ :py:class:`~docker.types.IPAMConfig`.
+
+ Args:
+
+ subnet (str): Custom subnet for this IPAM pool using the CIDR
+ notation. Defaults to ``None``.
+ iprange (str): Custom IP range for endpoints in this IPAM pool using
+ the CIDR notation. Defaults to ``None``.
+ gateway (str): Custom IP address for the pool's gateway.
+ aux_addresses (dict): A dictionary of ``key -> ip_address``
+ relationships specifying auxiliary addresses that need to be
+ allocated by the IPAM driver.
+
+ Example:
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='124.42.0.0/16',
+ iprange='124.42.0.0/24',
+ gateway='124.42.0.254',
+ aux_addresses={
+ 'reserved1': '124.42.1.1'
+ }
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool])
+ """
+ def __init__(self, subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ self.update({
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ })
diff --git a/docker/types/services.py b/docker/types/services.py
index 063779c..09eb05e 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,20 +1,57 @@
import six
from .. import errors
+from ..constants import IS_WINDOWS_PLATFORM
+from ..utils import (
+ check_resource, format_environment, format_extra_hosts, parse_bytes,
+ split_command, convert_service_networks,
+)
class TaskTemplate(dict):
+ """
+ Describe the task specification to be used when creating or updating a
+ service.
+
+ Args:
+
+ container_spec (ContainerSpec): Container settings for containers
+ started as part of this task.
+ log_driver (DriverConfig): Log configuration for containers created as
+ part of the service.
+ resources (Resources): Resource requirements which apply to each
+ individual container created as part of the service.
+ restart_policy (RestartPolicy): Specification for the restart policy
+ which applies to containers created as part of this service.
+ placement (Placement): Placement instructions for the scheduler.
+ If a list is passed instead, it is assumed to be a list of
+ constraints as part of a :py:class:`Placement` object.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the containers to.
+ force_update (int): A counter that triggers an update even if no
+ relevant parameters have been changed.
+ """
def __init__(self, container_spec, resources=None, restart_policy=None,
- placement=None, log_driver=None):
+ placement=None, log_driver=None, networks=None,
+ force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
+ if isinstance(placement, list):
+ placement = Placement(constraints=placement)
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
+ if networks:
+ self['Networks'] = convert_service_networks(networks)
+
+ if force_update is not None:
+ if not isinstance(force_update, int):
+ raise TypeError('force_update must be an integer')
+ self['ForceUpdate'] = force_update
@property
def container_spec(self):
@@ -34,10 +71,52 @@ class TaskTemplate(dict):
class ContainerSpec(dict):
- def __init__(self, image, command=None, args=None, env=None, workdir=None,
- user=None, labels=None, mounts=None, stop_grace_period=None):
- from ..utils import split_command # FIXME: circular import
+ """
+ Describes the behavior of containers that are part of a task, and is used
+ when declaring a :py:class:`~docker.types.TaskTemplate`.
+ Args:
+
+ image (string): The image name to use for the container.
+ command (string or list): The command to be run in the image.
+ args (:py:class:`list`): Arguments to the command.
+ hostname (string): The hostname to set on the container.
+ env (dict): Environment variables.
+ dir (string): The working directory for commands to run in.
+ user (string): The user inside the container.
+ labels (dict): A map of labels to associate with the service.
+ mounts (:py:class:`list`): A list of specifications for mounts to be
+ added to containers created as part of the service. See the
+ :py:class:`~docker.types.Mount` class for details.
+ stop_grace_period (int): Amount of time to wait for the container to
+ terminate before forcefully killing it.
+ secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
+ made available inside the containers.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's ``hosts`` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference` that
+ will be exposed to the service.
+ privileges (Privileges): Security options for the service's containers.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ """
+ def __init__(self, image, command=None, args=None, hostname=None, env=None,
+ workdir=None, user=None, labels=None, mounts=None,
+ stop_grace_period=None, secrets=None, tty=None, groups=None,
+ open_stdin=None, read_only=None, stop_signal=None,
+ healthcheck=None, hosts=None, dns_config=None, configs=None,
+ privileges=None, isolation=None):
self['Image'] = image
if isinstance(command, six.string_types):
@@ -45,81 +124,195 @@ class ContainerSpec(dict):
self['Command'] = command
self['Args'] = args
+ if hostname is not None:
+ self['Hostname'] = hostname
if env is not None:
- self['Env'] = env
+ if isinstance(env, dict):
+ self['Env'] = format_environment(env)
+ else:
+ self['Env'] = env
if workdir is not None:
self['Dir'] = workdir
if user is not None:
self['User'] = user
+ if groups is not None:
+ self['Groups'] = groups
+ if stop_signal is not None:
+ self['StopSignal'] = stop_signal
+ if stop_grace_period is not None:
+ self['StopGracePeriod'] = stop_grace_period
if labels is not None:
self['Labels'] = labels
+ if hosts is not None:
+ self['Hosts'] = format_extra_hosts(hosts, task=True)
+
if mounts is not None:
+ parsed_mounts = []
for mount in mounts:
if isinstance(mount, six.string_types):
- mounts.append(Mount.parse_mount_string(mount))
- mounts.remove(mount)
- self['Mounts'] = mounts
- if stop_grace_period is not None:
- self['StopGracePeriod'] = stop_grace_period
+ parsed_mounts.append(Mount.parse_mount_string(mount))
+ else:
+ # If mount already parsed
+ parsed_mounts.append(mount)
+ self['Mounts'] = parsed_mounts
+
+ if secrets is not None:
+ if not isinstance(secrets, list):
+ raise TypeError('secrets must be a list')
+ self['Secrets'] = secrets
+
+ if configs is not None:
+ if not isinstance(configs, list):
+ raise TypeError('configs must be a list')
+ self['Configs'] = configs
+
+ if dns_config is not None:
+ self['DNSConfig'] = dns_config
+ if privileges is not None:
+ self['Privileges'] = privileges
+ if healthcheck is not None:
+ self['Healthcheck'] = healthcheck
+
+ if tty is not None:
+ self['TTY'] = tty
+ if open_stdin is not None:
+ self['OpenStdin'] = open_stdin
+ if read_only is not None:
+ self['ReadOnly'] = read_only
+
+ if isolation is not None:
+ self['Isolation'] = isolation
class Mount(dict):
+ """
+ Describes a mounted folder's configuration inside a container. A list of
+ :py:class:`Mount` would be used as part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ target (string): Container path.
+ source (string): Mount source (e.g. a volume name or a host path).
+ type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
+ ``npipe``). Default: ``volume``.
+ read_only (bool): Whether the mount should be read-only.
+ consistency (string): The consistency requirement for the mount. One of
+ ``default```, ``consistent``, ``cached``, ``delegated``.
+ propagation (string): A propagation mode with the value ``[r]private``,
+ ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
+ no_copy (bool): False if the volume should be populated with the data
+ from the target. Default: ``False``. Only valid for the ``volume``
+ type.
+ labels (dict): User-defined name and labels for the volume. Only valid
+ for the ``volume`` type.
+ driver_config (DriverConfig): Volume driver configuration. Only valid
+ for the ``volume`` type.
+ tmpfs_size (int or string): The size for the tmpfs mount in bytes.
+ tmpfs_mode (int): The permission mode for the tmpfs mount.
+ """
def __init__(self, target, source, type='volume', read_only=False,
- propagation=None, no_copy=False, labels=None,
- driver_config=None):
+ consistency=None, propagation=None, no_copy=False,
+ labels=None, driver_config=None, tmpfs_size=None,
+ tmpfs_mode=None):
self['Target'] = target
self['Source'] = source
- if type not in ('bind', 'volume'):
- raise errors.DockerError(
- 'Only acceptable mount types are `bind` and `volume`.'
+ if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
+ raise errors.InvalidArgument(
+ 'Unsupported mount type: "{}"'.format(type)
)
self['Type'] = type
+ self['ReadOnly'] = read_only
+
+ if consistency:
+ self['Consistency'] = consistency
if type == 'bind':
if propagation is not None:
self['BindOptions'] = {
'Propagation': propagation
}
- if any([labels, driver_config, no_copy]):
- raise errors.DockerError(
- 'Mount type is binding but volume options have been '
- 'provided.'
+ if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the bind '
+ 'type mount.'
)
- else:
+ elif type == 'volume':
volume_opts = {}
if no_copy:
volume_opts['NoCopy'] = True
if labels:
volume_opts['Labels'] = labels
if driver_config:
- volume_opts['driver_config'] = driver_config
+ volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
- if propagation:
- raise errors.DockerError(
- 'Mount type is volume but `propagation` argument has been '
- 'provided.'
+ if any([propagation, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the volume '
+ 'type mount.'
+ )
+ elif type == 'tmpfs':
+ tmpfs_opts = {}
+ if tmpfs_mode:
+ if not isinstance(tmpfs_mode, six.integer_types):
+ raise errors.InvalidArgument(
+ 'tmpfs_mode must be an integer'
+ )
+ tmpfs_opts['Mode'] = tmpfs_mode
+ if tmpfs_size:
+ tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size)
+ if tmpfs_opts:
+ self['TmpfsOptions'] = tmpfs_opts
+ if any([propagation, labels, driver_config, no_copy]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the tmpfs '
+ 'type mount.'
)
@classmethod
def parse_mount_string(cls, string):
parts = string.split(':')
if len(parts) > 3:
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'Invalid mount format "{0}"'.format(string)
)
if len(parts) == 1:
- return cls(target=parts[0])
+ return cls(target=parts[0], source=None)
else:
target = parts[1]
source = parts[0]
- read_only = not (len(parts) == 3 or parts[2] == 'ro')
- return cls(target, source, read_only=read_only)
+ mount_type = 'volume'
+ if source.startswith('/') or (
+ IS_WINDOWS_PLATFORM and source[0].isalpha() and
+ source[1] == ':'
+ ):
+ # FIXME: That windows condition will fail earlier since we
+ # split on ':'. We should look into doing a smarter split
+ # if we detect we are on Windows.
+ mount_type = 'bind'
+ read_only = not (len(parts) == 2 or parts[2] == 'rw')
+ return cls(target, source, read_only=read_only, type=mount_type)
class Resources(dict):
+ """
+ Configures resource allocation for containers when made part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ cpu_limit (int): CPU limit in units of 10^9 CPU shares.
+ mem_limit (int): Memory limit in Bytes.
+ cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
+ mem_reservation (int): Memory reservation in Bytes.
+ generic_resources (dict or :py:class:`list`): Node level generic
+ resources, for example a GPU, using the following format:
+ ``{ resource_name: resource_value }``. Alternatively, a list of
+ of resource specifications as defined by the Engine API.
+ """
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
- mem_reservation=None):
+ mem_reservation=None, generic_resources=None):
limits = {}
reservation = {}
if cpu_limit is not None:
@@ -130,24 +323,95 @@ class Resources(dict):
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
-
+ if generic_resources is not None:
+ reservation['GenericResources'] = (
+ _convert_generic_resources_dict(generic_resources)
+ )
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
+def _convert_generic_resources_dict(generic_resources):
+ if isinstance(generic_resources, list):
+ return generic_resources
+ if not isinstance(generic_resources, dict):
+ raise errors.InvalidArgument(
+ 'generic_resources must be a dict or a list'
+ ' (found {})'.format(type(generic_resources))
+ )
+ resources = []
+ for kind, value in six.iteritems(generic_resources):
+ resource_type = None
+ if isinstance(value, int):
+ resource_type = 'DiscreteResourceSpec'
+ elif isinstance(value, str):
+ resource_type = 'NamedResourceSpec'
+ else:
+ raise errors.InvalidArgument(
+ 'Unsupported generic resource reservation '
+ 'type: {}'.format({kind: value})
+ )
+ resources.append({
+ resource_type: {'Kind': kind, 'Value': value}
+ })
+ return resources
+
+
class UpdateConfig(dict):
- def __init__(self, parallelism=0, delay=None, failure_action='continue'):
+ """
+
+ Used to specify the way container updates should be performed by a service.
+
+ Args:
+
+ parallelism (int): Maximum number of tasks to be updated in one
+ iteration (0 means unlimited parallelism). Default: 0.
+ delay (int): Amount of time between updates.
+ failure_action (string): Action to take if an updated task fails to
+ run, or stops running during the update. Acceptable values are
+ ``continue`` and ``pause``. Default: ``continue``
+ monitor (int): Amount of time to monitor each updated task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ an update before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out an
+ updated task. Either ``start_first`` or ``stop_first`` are accepted.
+ """
+ def __init__(self, parallelism=0, delay=None, failure_action='continue',
+ monitor=None, max_failure_ratio=None, order=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue'):
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'failure_action must be either `pause` or `continue`.'
)
self['FailureAction'] = failure_action
+ if monitor is not None:
+ if not isinstance(monitor, int):
+ raise TypeError('monitor must be an integer')
+ self['Monitor'] = monitor
+
+ if max_failure_ratio is not None:
+ if not isinstance(max_failure_ratio, (float, int)):
+ raise TypeError('max_failure_ratio must be a float')
+ if max_failure_ratio > 1 or max_failure_ratio < 0:
+ raise errors.InvalidArgument(
+ 'max_failure_ratio must be a number between 0 and 1'
+ )
+ self['MaxFailureRatio'] = max_failure_ratio
+
+ if order is not None:
+ if order not in ('start-first', 'stop-first'):
+ raise errors.InvalidArgument(
+ 'order must be either `start-first` or `stop-first`'
+ )
+ self['Order'] = order
+
class RestartConditionTypesEnum(object):
_values = (
@@ -159,6 +423,21 @@ class RestartConditionTypesEnum(object):
class RestartPolicy(dict):
+ """
+ Used when creating a :py:class:`~docker.types.ContainerSpec`,
+ dictates whether a container should restart after stopping or failing.
+
+ Args:
+
+ condition (string): Condition for restart (``none``, ``on-failure``,
+ or ``any``). Default: `none`.
+ delay (int): Delay between restart attempts. Default: 0
+ max_attempts (int): Maximum attempts to restart a given container
+ before giving up. Default value is 0, which is ignored.
+ window (int): Time window used to evaluate the restart policy. Default
+ value is 0, which is unbounded.
+ """
+
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
@@ -175,7 +454,262 @@ class RestartPolicy(dict):
class DriverConfig(dict):
+ """
+ Indicates which driver to use, as well as its configuration. Can be used
+ as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
+ for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
+ as the driver object in
+ :py:meth:`create_secret`.
+
+ Args:
+
+ name (string): Name of the driver to use.
+ options (dict): Driver-specific options. Default: ``None``.
+ """
def __init__(self, name, options=None):
self['Name'] = name
if options:
self['Options'] = options
+
+
+class EndpointSpec(dict):
+ """
+ Describes properties to access and load-balance a service.
+
+ Args:
+
+ mode (string): The mode of resolution to use for internal load
+ balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
+ ``'vip'`` if not provided.
+ ports (dict): Exposed ports that this service is accessible on from the
+ outside, in the form of ``{ published_port: target_port }`` or
+ ``{ published_port: <port_config_tuple> }``. Port config tuple format
+ is ``(target_port [, protocol [, publish_mode]])``.
+ Ports can only be provided if the ``vip`` resolution mode is used.
+ """
+ def __init__(self, mode=None, ports=None):
+ if ports:
+ self['Ports'] = convert_service_ports(ports)
+ if mode:
+ self['Mode'] = mode
+
+
+def convert_service_ports(ports):
+ if isinstance(ports, list):
+ return ports
+ if not isinstance(ports, dict):
+ raise TypeError(
+ 'Invalid type for ports, expected dict or list'
+ )
+
+ result = []
+ for k, v in six.iteritems(ports):
+ port_spec = {
+ 'Protocol': 'tcp',
+ 'PublishedPort': k
+ }
+
+ if isinstance(v, tuple):
+ port_spec['TargetPort'] = v[0]
+ if len(v) >= 2 and v[1] is not None:
+ port_spec['Protocol'] = v[1]
+ if len(v) == 3:
+ port_spec['PublishMode'] = v[2]
+ if len(v) > 3:
+ raise ValueError(
+ 'Service port configuration can have at most 3 elements: '
+ '(target_port, protocol, mode)'
+ )
+ else:
+ port_spec['TargetPort'] = v
+
+ result.append(port_spec)
+ return result
+
+
+class ServiceMode(dict):
+ """
+ Indicate whether a service should be deployed as a replicated or global
+ service, and associated parameters
+
+ Args:
+ mode (string): Can be either ``replicated`` or ``global``
+ replicas (int): Number of replicas. For replicated services only.
+ """
+ def __init__(self, mode, replicas=None):
+ if mode not in ('replicated', 'global'):
+ raise errors.InvalidArgument(
+ 'mode must be either "replicated" or "global"'
+ )
+ if mode != 'replicated' and replicas is not None:
+ raise errors.InvalidArgument(
+ 'replicas can only be used for replicated mode'
+ )
+ self[mode] = {}
+ if replicas is not None:
+ self[mode]['Replicas'] = replicas
+
+ @property
+ def mode(self):
+ if 'global' in self:
+ return 'global'
+ return 'replicated'
+
+ @property
+ def replicas(self):
+ if self.mode != 'replicated':
+ return None
+ return self['replicated'].get('Replicas')
+
+
+class SecretReference(dict):
+ """
+ Secret reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a secret is made accessible inside the service's
+ containers.
+
+ Args:
+ secret_id (string): Secret's ID
+ secret_name (string): Secret's name as defined at its creation.
+ filename (string): Name of the file containing the secret. Defaults
+ to the secret's name if not specified.
+ uid (string): UID of the secret file's owner. Default: 0
+ gid (string): GID of the secret file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('secret_id')
+ def __init__(self, secret_id, secret_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['SecretName'] = secret_name
+ self['SecretID'] = secret_id
+ self['File'] = {
+ 'Name': filename or secret_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class ConfigReference(dict):
+ """
+ Config reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a config is made accessible inside the service's
+ containers.
+
+ Args:
+ config_id (string): Config's ID
+ config_name (string): Config's name as defined at its creation.
+ filename (string): Name of the file containing the config. Defaults
+ to the config's name if not specified.
+ uid (string): UID of the config file's owner. Default: 0
+ gid (string): GID of the config file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('config_id')
+ def __init__(self, config_id, config_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['ConfigName'] = config_name
+ self['ConfigID'] = config_id
+ self['File'] = {
+ 'Name': filename or config_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class Placement(dict):
+ """
+ Placement constraints to be used as part of a :py:class:`TaskTemplate`
+
+ Args:
+ constraints (:py:class:`list`): A list of constraints
+ preferences (:py:class:`list`): Preferences provide a way to make
+ the scheduler aware of factors such as topology. They are
+ provided in order from highest to lowest precedence.
+ platforms (:py:class:`list`): A list of platforms expressed as
+ ``(arch, os)`` tuples
+ """
+ def __init__(self, constraints=None, preferences=None, platforms=None):
+ if constraints is not None:
+ self['Constraints'] = constraints
+ if preferences is not None:
+ self['Preferences'] = preferences
+ if platforms:
+ self['Platforms'] = []
+ for plat in platforms:
+ self['Platforms'].append({
+ 'Architecture': plat[0], 'OS': plat[1]
+ })
+
+
+class DNSConfig(dict):
+ """
+ Specification for DNS related configurations in resolver configuration
+ file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ nameservers (:py:class:`list`): The IP addresses of the name
+ servers.
+ search (:py:class:`list`): A search list for host-name lookup.
+ options (:py:class:`list`): A list of internal resolver variables
+ to be modified (e.g., ``debug``, ``ndots:3``, etc.).
+ """
+ def __init__(self, nameservers=None, search=None, options=None):
+ self['Nameservers'] = nameservers
+ self['Search'] = search
+ self['Options'] = options
+
+
+class Privileges(dict):
+ """
+ Security options for a service's containers.
+ Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ credentialspec_file (str): Load credential spec from this file.
+ The file is read by the daemon, and must be present in the
+ CredentialSpecs subdirectory in the docker data directory,
+ which defaults to ``C:\ProgramData\Docker\`` on Windows.
+ Can not be combined with credentialspec_registry.
+
+ credentialspec_registry (str): Load credential spec from this value
+ in the Windows registry. The specified registry value must be
+ located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
+ \Virtualization\Containers\CredentialSpecs``.
+ Can not be combined with credentialspec_file.
+
+ selinux_disable (boolean): Disable SELinux
+ selinux_user (string): SELinux user label
+ selinux_role (string): SELinux role label
+ selinux_type (string): SELinux type label
+ selinux_level (string): SELinux level label
+ """
+ def __init__(self, credentialspec_file=None, credentialspec_registry=None,
+ selinux_disable=None, selinux_user=None, selinux_role=None,
+ selinux_type=None, selinux_level=None):
+ credential_spec = {}
+ if credentialspec_registry is not None:
+ credential_spec['Registry'] = credentialspec_registry
+ if credentialspec_file is not None:
+ credential_spec['File'] = credentialspec_file
+
+ if len(credential_spec) > 1:
+ raise errors.InvalidArgument(
+ 'credentialspec_file and credentialspec_registry are mutually'
+ ' exclusive'
+ )
+
+ selinux_context = {
+ 'Disable': selinux_disable,
+ 'User': selinux_user,
+ 'Role': selinux_role,
+ 'Type': selinux_type,
+ 'Level': selinux_level,
+ }
+
+ if len(credential_spec) > 0:
+ self['CredentialSpec'] = credential_spec
+
+ if len(selinux_context) > 0:
+ self['SELinuxContext'] = selinux_context
diff --git a/docker/types/swarm.py b/docker/types/swarm.py
index 865fde6..9687a82 100644
--- a/docker/types/swarm.py
+++ b/docker/types/swarm.py
@@ -1,15 +1,30 @@
+from ..errors import InvalidVersion
+from ..utils import version_lt
+
+
class SwarmSpec(dict):
- def __init__(self, task_history_retention_limit=None,
+ """
+ Describe a Swarm's configuration and options. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
+ to instantiate.
+ """
+ def __init__(self, version, task_history_retention_limit=None,
snapshot_interval=None, keep_old_snapshots=None,
log_entries_for_slow_followers=None, heartbeat_tick=None,
election_tick=None, dispatcher_heartbeat_period=None,
- node_cert_expiry=None, external_ca=None, name=None):
+ node_cert_expiry=None, external_cas=None, name=None,
+ labels=None, signing_ca_cert=None, signing_ca_key=None,
+ ca_force_rotate=None, autolock_managers=None,
+ log_driver=None):
if task_history_retention_limit is not None:
self['Orchestration'] = {
'TaskHistoryRetentionLimit': task_history_retention_limit
}
- if any([snapshot_interval, keep_old_snapshots,
- log_entries_for_slow_followers, heartbeat_tick, election_tick]):
+ if any([snapshot_interval,
+ keep_old_snapshots,
+ log_entries_for_slow_followers,
+ heartbeat_tick,
+ election_tick]):
self['Raft'] = {
'SnapshotInterval': snapshot_interval,
'KeepOldSnapshots': keep_old_snapshots,
@@ -23,18 +38,82 @@ class SwarmSpec(dict):
'HeartbeatPeriod': dispatcher_heartbeat_period
}
- if node_cert_expiry or external_ca:
- self['CAConfig'] = {
- 'NodeCertExpiry': node_cert_expiry,
- 'ExternalCA': external_ca
- }
+ ca_config = {}
+ if node_cert_expiry is not None:
+ ca_config['NodeCertExpiry'] = node_cert_expiry
+ if external_cas:
+ if version_lt(version, '1.25'):
+ if len(external_cas) > 1:
+ raise InvalidVersion(
+ 'Support for multiple external CAs is not available '
+ 'for API version < 1.25'
+ )
+ ca_config['ExternalCA'] = external_cas[0]
+ else:
+ ca_config['ExternalCAs'] = external_cas
+ if signing_ca_key:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_key is not supported in API version < 1.30'
+ )
+ ca_config['SigningCAKey'] = signing_ca_key
+ if signing_ca_cert:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_cert is not supported in API version < 1.30'
+ )
+ ca_config['SigningCACert'] = signing_ca_cert
+ if ca_force_rotate is not None:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'force_rotate is not supported in API version < 1.30'
+ )
+ ca_config['ForceRotate'] = ca_force_rotate
+ if ca_config:
+ self['CAConfig'] = ca_config
+
+ if autolock_managers is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'autolock_managers is not supported in API version < 1.25'
+ )
+
+ self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers}
+
+ if log_driver is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'log_driver is not supported in API version < 1.25'
+ )
+
+ self['TaskDefaults'] = {'LogDriver': log_driver}
if name is not None:
self['Name'] = name
+ if labels is not None:
+ self['Labels'] = labels
class SwarmExternalCA(dict):
- def __init__(self, url, protocol=None, options=None):
+ """
+ Configuration for forwarding signing requests to an external
+ certificate authority.
+
+ Args:
+ url (string): URL where certificate signing requests should be
+ sent.
+ protocol (string): Protocol for communication with the external CA.
+ options (dict): An object with key/value pairs that are interpreted
+ as protocol-specific options for the external CA driver.
+ ca_cert (string): The root CA certificate (in PEM format) this
+ external CA uses to issue TLS certificates (assumed to be to
+ the current swarm root CA certificate if not provided).
+
+
+
+ """
+ def __init__(self, url, protocol=None, options=None, ca_cert=None):
self['URL'] = url
self['Protocol'] = protocol
self['Options'] = options
+ self['CACert'] = ca_cert
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 4bb3876..81c8186 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,13 +1,13 @@
# flake8: noqa
+from .build import create_archive, exclude_paths, mkbuildcontext, tar
+from .decorators import check_resource, minimum_version, update_headers
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
- mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
+ parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
- create_host_config, create_container_config, parse_bytes, ping_registry,
- parse_env_file, version_lt, version_gte, decode_json_header, split_command,
- create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
+ create_host_config, parse_bytes, parse_env_file, version_lt,
+ version_gte, decode_json_header, split_command, create_ipam_config,
+ create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
+ format_environment, format_extra_hosts
)
-from ..types import LogConfig, Ulimit
-from ..types import SwarmExternalCA, SwarmSpec
-from .decorators import check_resource, minimum_version, update_headers
diff --git a/docker/utils/build.py b/docker/utils/build.py
new file mode 100644
index 0000000..b644c9f
--- /dev/null
+++ b/docker/utils/build.py
@@ -0,0 +1,219 @@
+import io
+import os
+import re
+import six
+import tarfile
+import tempfile
+
+from ..constants import IS_WINDOWS_PLATFORM
+from fnmatch import fnmatch
+from itertools import chain
+
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+ def normalize(p):
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ patterns = (
+ (True, normalize(p[1:]))
+ if p.startswith('!') else
+ (False, normalize(p))
+ for p in patterns)
+ patterns = list(reversed(list(chain(
+ # Exclude empty patterns such as "." or the empty string.
+ filter(lambda p: p[1], patterns),
+ # Always include the Dockerfile and .dockerignore
+ [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
+ return set(walk(root, patterns))
+
+
+def walk(root, patterns, default=True):
+ """
+ A collection of file lying below root that should be included according to
+ patterns.
+ """
+
+ def match(p):
+ if p[1][0] == '**':
+ rec = (p[0], p[1][1:])
+ return [p] + (match(rec) if rec[1] else [rec])
+ elif fnmatch(f, p[1][0]):
+ return [(p[0], p[1][1:])]
+ else:
+ return []
+
+ for f in os.listdir(root):
+ cur = os.path.join(root, f)
+ # The patterns if recursing in that directory.
+ sub = list(chain(*(match(p) for p in patterns)))
+ # Whether this file is explicitely included / excluded.
+ hit = next((p[0] for p in sub if not p[1]), None)
+ # Whether this file is implicitely included / excluded.
+ matched = default if hit is None else hit
+ sub = list(filter(lambda p: p[1], sub))
+ if os.path.isdir(cur) and not os.path.islink(cur):
+ # Entirely skip directories if there are no chance any subfile will
+ # be included.
+ if all(not p[0] for p in sub) and not matched:
+ continue
+ # I think this would greatly speed up dockerignore handling by not
+ # recursing into directories we are sure would be entirely
+ # included, and only yielding the directory itself, which will be
+ # recursively archived anyway. However the current unit test expect
+ # the full list of subfiles and I'm not 100% sure it would make no
+ # difference yet.
+ # if all(p[0] for p in sub) and matched:
+ # yield f
+ # continue
+ children = False
+ for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
+ yield r
+ children = True
+ # The current unit tests expect directories only under those
+ # conditions. It might be simplifiable though.
+ if (not sub or not children) and hit or hit is None and default:
+ yield f
+ elif matched:
+ yield f
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = set(e[0] for e in extra_files)
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ raise IOError(
+ 'Can not read file in context: {}'.format(full_path)
+ )
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ info.size = len(contents)
+ t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ if six.PY3:
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ else:
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ else:
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
diff --git a/docker/utils/config.py b/docker/utils/config.py
new file mode 100644
index 0000000..82a0e2a
--- /dev/null
+++ b/docker/utils/config.py
@@ -0,0 +1,66 @@
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug("Trying paths: {0}".format(repr(paths)))
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug("Found file at path: {0}".format(path))
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (IOError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 2fe880c..c975d4b 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -4,22 +4,21 @@ from .. import errors
from . import utils
-def check_resource(f):
- @functools.wraps(f)
- def wrapped(self, resource_id=None, *args, **kwargs):
- if resource_id is None:
- if kwargs.get('container'):
- resource_id = kwargs.pop('container')
- elif kwargs.get('image'):
- resource_id = kwargs.pop('image')
- if isinstance(resource_id, dict):
- resource_id = resource_id.get('Id', resource_id.get('ID'))
- if not resource_id:
- raise errors.NullResource(
- 'image or container param is undefined'
- )
- return f(self, resource_id, *args, **kwargs)
- return wrapped
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
def minimum_version(version):
@@ -39,10 +38,10 @@ def minimum_version(version):
def update_headers(f):
def inner(self, *args, **kwargs):
- if 'HttpHeaders' in self._auth_configs:
+ if 'HttpHeaders' in self._general_configs:
if not kwargs.get('headers'):
- kwargs['headers'] = self._auth_configs['HttpHeaders']
+ kwargs['headers'] = self._general_configs['HttpHeaders']
else:
- kwargs['headers'].update(self._auth_configs['HttpHeaders'])
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
new file mode 100644
index 0000000..42461dd
--- /dev/null
+++ b/docker/utils/fnmatch.py
@@ -0,0 +1,114 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ i, n = 0, len(pat)
+ res = '^'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ # is some flavor of "**"
+ i = i + 1
+ # Treat **/ as ** so eat the "/"
+ if i < n and pat[i] == '/':
+ i = i + 1
+ if i >= n:
+ # is "**EOF" - to align with .gitignore just accept all
+ res = res + '.*'
+ else:
+ # is "**"
+ # Note that this allows for any # of /'s (even 0) because
+ # the .* will eat everything, even /'s
+ res = res + '(.*/)?'
+ else:
+ # is "*" so map it to anything but "/"
+ res = res + '[^/]*'
+ elif c == '?':
+ # "?" is any char except "/"
+ res = res + '[^/]'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ else:
+ res = res + re.escape(c)
+ return res + '$'
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
new file mode 100644
index 0000000..addffdf
--- /dev/null
+++ b/docker/utils/json_stream.py
@@ -0,0 +1,80 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import json.decoder
+
+import six
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
new file mode 100644
index 0000000..bf7d697
--- /dev/null
+++ b/docker/utils/ports.py
@@ -0,0 +1,83 @@
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ "((?P<host>[a-fA-F\d.:]+):)?" # Address
+ "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return ['{}-{}'.format(start, end) + proto]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/docker/utils/ports/__init__.py b/docker/utils/ports/__init__.py
deleted file mode 100644
index 1dbfa3a..0000000
--- a/docker/utils/ports/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .ports import (
- split_port,
- build_port_bindings
-) # flake8: noqa
diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py
deleted file mode 100644
index 326ef94..0000000
--- a/docker/utils/ports/ports.py
+++ /dev/null
@@ -1,92 +0,0 @@
-
-def add_port_mapping(port_bindings, internal_port, external):
- if internal_port in port_bindings:
- port_bindings[internal_port].append(external)
- else:
- port_bindings[internal_port] = [external]
-
-
-def add_port(port_bindings, internal_port_range, external_range):
- if external_range is None:
- for internal_port in internal_port_range:
- add_port_mapping(port_bindings, internal_port, None)
- else:
- ports = zip(internal_port_range, external_range)
- for internal_port, external_port in ports:
- add_port_mapping(port_bindings, internal_port, external_port)
-
-
-def build_port_bindings(ports):
- port_bindings = {}
- for port in ports:
- internal_port_range, external_range = split_port(port)
- add_port(port_bindings, internal_port_range, external_range)
- return port_bindings
-
-
-def to_port_range(port):
- if not port:
- return None
-
- protocol = ""
- if "/" in port:
- parts = port.split("/")
- if len(parts) != 2:
- _raise_invalid_port(port)
-
- port, protocol = parts
- protocol = "/" + protocol
-
- parts = str(port).split('-')
-
- if len(parts) == 1:
- return ["%s%s" % (port, protocol)]
-
- if len(parts) == 2:
- full_port_range = range(int(parts[0]), int(parts[1]) + 1)
- return ["%s%s" % (p, protocol) for p in full_port_range]
-
- raise ValueError('Invalid port range "%s", should be '
- 'port or startport-endport' % port)
-
-
-def _raise_invalid_port(port):
- raise ValueError('Invalid port "%s", should be '
- '[[remote_ip:]remote_port[-remote_port]:]'
- 'port[/protocol]' % port)
-
-
-def split_port(port):
- parts = str(port).split(':')
-
- if not 1 <= len(parts) <= 3:
- _raise_invalid_port(port)
-
- if len(parts) == 1:
- internal_port, = parts
- return to_port_range(internal_port), None
- if len(parts) == 2:
- external_port, internal_port = parts
-
- internal_range = to_port_range(internal_port)
- external_range = to_port_range(external_port)
-
- if internal_range is None or external_range is None:
- _raise_invalid_port(port)
-
- if len(internal_range) != len(external_range):
- raise ValueError('Port ranges don\'t match in length')
-
- return internal_range, external_range
-
- external_ip, external_port, internal_port = parts
- internal_range = to_port_range(internal_port)
- external_range = to_port_range(external_port)
- if not external_range:
- external_range = [None] * len(internal_range)
-
- if len(internal_range) != len(external_range):
- raise ValueError('Port ranges don\'t match in length')
-
- return internal_range, [(external_ip, ex_port or None)
- for ex_port in external_range]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 164b845..0945f0a 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -22,8 +22,7 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- # wait for data to become available
- if not isinstance(socket, NpipeSocket):
+ if six.PY3 and not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
@@ -59,7 +58,7 @@ def next_frame_size(socket):
try:
data = read_exactly(socket, 8)
except SocketError:
- return 0
+ return -1
_, actual = struct.unpack('>BxxxL', data)
return actual
@@ -69,7 +68,30 @@ def frames_iter(socket):
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n < 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ if result is None:
+ continue
+ data_length = len(result)
+ if data_length == 0:
+ # We have reached EOF
+ return
+ n -= data_length
+ yield result
+
+
+def socket_raw_iter(socket):
+ """
+ Returns a generator of data read from the socket.
+ This is used for non-multiplexed streams.
+ """
+ while True:
+ result = read(socket)
+ if len(result) == 0:
+ # We have reached EOF
+ return
+ yield result
diff --git a/docker/utils/types.py b/docker/utils/types.py
deleted file mode 100644
index 8098c47..0000000
--- a/docker/utils/types.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Compatibility module. See https://github.com/docker/docker-py/issues/1196
-
-import warnings
-
-from ..types import Ulimit, LogConfig # flake8: noqa
-
-warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 8d55b57..fe3b9a5 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,23 +1,15 @@
import base64
-import io
import os
import os.path
import json
import shlex
-import tarfile
-import tempfile
-import warnings
from distutils.version import StrictVersion
from datetime import datetime
-from fnmatch import fnmatch
-import requests
import six
-from .. import constants
from .. import errors
from .. import tls
-from ..types import Ulimit, LogConfig
if six.PY2:
from urllib import splitnport
@@ -36,44 +28,18 @@ BYTE_UNITS = {
}
-def create_ipam_pool(subnet=None, iprange=None, gateway=None,
- aux_addresses=None):
- return {
- 'Subnet': subnet,
- 'IPRange': iprange,
- 'Gateway': gateway,
- 'AuxiliaryAddresses': aux_addresses
- }
-
-
-def create_ipam_config(driver='default', pool_configs=None):
- return {
- 'Driver': driver,
- 'Config': pool_configs or []
- }
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
-def mkbuildcontext(dockerfile):
- f = tempfile.NamedTemporaryFile()
- t = tarfile.open(mode='w', fileobj=f)
- if isinstance(dockerfile, io.StringIO):
- dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
- elif isinstance(dockerfile, io.BytesIO):
- dfinfo = tarfile.TarInfo('Dockerfile')
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
- else:
- dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
- t.addfile(dfinfo, dockerfile)
- t.close()
- f.seek(0)
- return f
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
def decode_json_header(header):
@@ -83,122 +49,6 @@ def decode_json_header(header):
return json.loads(data)
-def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
- if not fileobj:
- fileobj = tempfile.NamedTemporaryFile()
- t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
-
- root = os.path.abspath(path)
- exclude = exclude or []
-
- for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
- t.add(os.path.join(root, path), arcname=path, recursive=False)
-
- t.close()
- fileobj.seek(0)
- return fileobj
-
-
-def exclude_paths(root, patterns, dockerfile=None):
- """
- Given a root directory path and a list of .dockerignore patterns, return
- an iterator of all paths (both regular files and directories) in the root
- directory that do *not* match any of the patterns.
-
- All paths returned are relative to the root.
- """
- if dockerfile is None:
- dockerfile = 'Dockerfile'
-
- exceptions = [p for p in patterns if p.startswith('!')]
-
- include_patterns = [p[1:] for p in exceptions]
- include_patterns += [dockerfile, '.dockerignore']
-
- exclude_patterns = list(set(patterns) - set(exceptions))
-
- paths = get_paths(root, exclude_patterns, include_patterns,
- has_exceptions=len(exceptions) > 0)
-
- return set(paths).union(
- # If the Dockerfile is in a subdirectory that is excluded, get_paths
- # will not descend into it and the file will be skipped. This ensures
- # it doesn't happen.
- set([dockerfile])
- if os.path.exists(os.path.join(root, dockerfile)) else set()
- )
-
-
-def should_include(path, exclude_patterns, include_patterns):
- """
- Given a path, a list of exclude patterns, and a list of inclusion patterns:
-
- 1. Returns True if the path doesn't match any exclusion pattern
- 2. Returns False if the path matches an exclusion pattern and doesn't match
- an inclusion pattern
- 3. Returns true if the path matches an exclusion pattern and matches an
- inclusion pattern
- """
- for pattern in exclude_patterns:
- if match_path(path, pattern):
- for pattern in include_patterns:
- if match_path(path, pattern):
- return True
- return False
- return True
-
-
-def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
- paths = []
-
- for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
- parent = os.path.relpath(parent, root)
- if parent == '.':
- parent = ''
-
- # If exception rules exist, we can't skip recursing into ignored
- # directories, as we need to look for exceptions in them.
- #
- # It may be possible to optimize this further for exception patterns
- # that *couldn't* match within ignored directores.
- #
- # This matches the current docker logic (as of 2015-11-24):
- # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
-
- if not has_exceptions:
-
- # Remove excluded patterns from the list of directories to traverse
- # by mutating the dirs we're iterating over.
- # This looks strange, but is considered the correct way to skip
- # traversal. See https://docs.python.org/2/library/os.html#os.walk
-
- dirs[:] = [d for d in dirs if
- should_include(os.path.join(parent, d),
- exclude_patterns, include_patterns)]
-
- for path in dirs:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- for path in files:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- return paths
-
-
-def match_path(path, pattern):
- pattern = pattern.rstrip('/')
- if pattern:
- pattern = os.path.relpath(pattern)
-
- pattern_components = pattern.split(os.path.sep)
- path_components = path.split(os.path.sep)[:len(pattern_components)]
- return fnmatch('/'.join(path_components), pattern)
-
-
def compare_version(v1, v2):
"""Compare docker versions
@@ -229,29 +79,6 @@ def version_gte(v1, v2):
return not version_lt(v1, v2)
-def ping_registry(url):
- warnings.warn(
- 'The `ping_registry` method is deprecated and will be removed.',
- DeprecationWarning
- )
-
- return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
-
-
-def ping(url, valid_4xx_statuses=None):
- try:
- res = requests.get(url, timeout=3)
- except Exception:
- return False
- else:
- # We don't send yet auth headers
- # and a v2 registry will respond with status 401
- return (
- res.status_code < 400 or
- (valid_4xx_statuses and res.status_code in valid_4xx_statuses)
- )
-
-
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
@@ -361,6 +188,20 @@ def convert_tmpfs_mounts(tmpfs):
return result
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, six.string_types):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
@@ -576,330 +417,6 @@ def parse_bytes(s):
return s
-def host_config_type_error(param, param_value, expected):
- error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
- return TypeError(error_msg.format(param, expected, type(param_value)))
-
-
-def host_config_version_error(param, version, less_than=True):
- operator = '<' if less_than else '>'
- error_msg = '{0} param is not supported in API versions {1} {2}'
- return errors.InvalidVersion(error_msg.format(param, operator, version))
-
-
-def host_config_value_error(param, param_value):
- error_msg = 'Invalid value for {0} param: {1}'
- return ValueError(error_msg.format(param, param_value))
-
-
-def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=False, links=None, privileged=False,
- dns=None, dns_search=None, volumes_from=None,
- network_mode=None, restart_policy=None, cap_add=None,
- cap_drop=None, devices=None, extra_hosts=None,
- read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None, log_config=None,
- mem_limit=None, memswap_limit=None,
- mem_reservation=None, kernel_memory=None,
- mem_swappiness=None, cgroup_parent=None,
- group_add=None, cpu_quota=None,
- cpu_period=None, blkio_weight=None,
- blkio_weight_device=None, device_read_bps=None,
- device_write_bps=None, device_read_iops=None,
- device_write_iops=None, oom_kill_disable=False,
- shm_size=None, sysctls=None, version=None, tmpfs=None,
- oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None):
-
- host_config = {}
-
- if not version:
- warnings.warn(
- 'docker.utils.create_host_config() is deprecated. Please use '
- 'Client.create_host_config() instead.'
- )
- version = constants.DEFAULT_DOCKER_API_VERSION
-
- if mem_limit is not None:
- host_config['Memory'] = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- host_config['MemorySwap'] = parse_bytes(memswap_limit)
-
- if mem_reservation:
- if version_lt(version, '1.21'):
- raise host_config_version_error('mem_reservation', '1.21')
-
- host_config['MemoryReservation'] = parse_bytes(mem_reservation)
-
- if kernel_memory:
- if version_lt(version, '1.21'):
- raise host_config_version_error('kernel_memory', '1.21')
-
- host_config['KernelMemory'] = parse_bytes(kernel_memory)
-
- if mem_swappiness is not None:
- if version_lt(version, '1.20'):
- raise host_config_version_error('mem_swappiness', '1.20')
- if not isinstance(mem_swappiness, int):
- raise host_config_type_error(
- 'mem_swappiness', mem_swappiness, 'int'
- )
-
- host_config['MemorySwappiness'] = mem_swappiness
-
- if shm_size is not None:
- if isinstance(shm_size, six.string_types):
- shm_size = parse_bytes(shm_size)
-
- host_config['ShmSize'] = shm_size
-
- if pid_mode not in (None, 'host'):
- raise host_config_value_error('pid_mode', pid_mode)
- elif pid_mode:
- host_config['PidMode'] = pid_mode
-
- if ipc_mode:
- host_config['IpcMode'] = ipc_mode
-
- if privileged:
- host_config['Privileged'] = privileged
-
- if oom_kill_disable:
- if version_lt(version, '1.20'):
- raise host_config_version_error('oom_kill_disable', '1.19')
-
- host_config['OomKillDisable'] = oom_kill_disable
-
- if oom_score_adj:
- if version_lt(version, '1.22'):
- raise host_config_version_error('oom_score_adj', '1.22')
- if not isinstance(oom_score_adj, int):
- raise host_config_type_error(
- 'oom_score_adj', oom_score_adj, 'int'
- )
- host_config['OomScoreAdj'] = oom_score_adj
-
- if publish_all_ports:
- host_config['PublishAllPorts'] = publish_all_ports
-
- if read_only is not None:
- host_config['ReadonlyRootfs'] = read_only
-
- if dns_search:
- host_config['DnsSearch'] = dns_search
-
- if network_mode:
- host_config['NetworkMode'] = network_mode
- elif network_mode is None and compare_version('1.19', version) > 0:
- host_config['NetworkMode'] = 'default'
-
- if restart_policy:
- if not isinstance(restart_policy, dict):
- raise host_config_type_error(
- 'restart_policy', restart_policy, 'dict'
- )
-
- host_config['RestartPolicy'] = restart_policy
-
- if cap_add:
- host_config['CapAdd'] = cap_add
-
- if cap_drop:
- host_config['CapDrop'] = cap_drop
-
- if devices:
- host_config['Devices'] = parse_devices(devices)
-
- if group_add:
- if version_lt(version, '1.20'):
- raise host_config_version_error('group_add', '1.20')
-
- host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
-
- if dns is not None:
- host_config['Dns'] = dns
-
- if dns_opt is not None:
- if version_lt(version, '1.21'):
- raise host_config_version_error('dns_opt', '1.21')
-
- host_config['DnsOptions'] = dns_opt
-
- if security_opt is not None:
- if not isinstance(security_opt, list):
- raise host_config_type_error('security_opt', security_opt, 'list')
-
- host_config['SecurityOpt'] = security_opt
-
- if sysctls:
- if not isinstance(sysctls, dict):
- raise host_config_type_error('sysctls', sysctls, 'dict')
- host_config['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- host_config['Sysctls'][k] = six.text_type(v)
-
- if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
- volumes_from = volumes_from.split(',')
-
- host_config['VolumesFrom'] = volumes_from
-
- if binds is not None:
- host_config['Binds'] = convert_volume_binds(binds)
-
- if port_bindings is not None:
- host_config['PortBindings'] = convert_port_bindings(port_bindings)
-
- if extra_hosts is not None:
- if isinstance(extra_hosts, dict):
- extra_hosts = [
- '{0}:{1}'.format(k, v)
- for k, v in sorted(six.iteritems(extra_hosts))
- ]
-
- host_config['ExtraHosts'] = extra_hosts
-
- if links is not None:
- host_config['Links'] = normalize_links(links)
-
- if isinstance(lxc_conf, dict):
- formatted = []
- for k, v in six.iteritems(lxc_conf):
- formatted.append({'Key': k, 'Value': str(v)})
- lxc_conf = formatted
-
- if lxc_conf is not None:
- host_config['LxcConf'] = lxc_conf
-
- if cgroup_parent is not None:
- host_config['CgroupParent'] = cgroup_parent
-
- if ulimits is not None:
- if not isinstance(ulimits, list):
- raise host_config_type_error('ulimits', ulimits, 'list')
- host_config['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- host_config['Ulimits'].append(l)
-
- if log_config is not None:
- if not isinstance(log_config, LogConfig):
- if not isinstance(log_config, dict):
- raise host_config_type_error(
- 'log_config', log_config, 'LogConfig'
- )
- log_config = LogConfig(**log_config)
-
- host_config['LogConfig'] = log_config
-
- if cpu_quota:
- if not isinstance(cpu_quota, int):
- raise host_config_type_error('cpu_quota', cpu_quota, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_quota', '1.19')
-
- host_config['CpuQuota'] = cpu_quota
-
- if cpu_period:
- if not isinstance(cpu_period, int):
- raise host_config_type_error('cpu_period', cpu_period, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_period', '1.19')
-
- host_config['CpuPeriod'] = cpu_period
-
- if cpu_shares:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpu_shares', '1.18')
-
- if not isinstance(cpu_shares, int):
- raise host_config_type_error('cpu_shares', cpu_shares, 'int')
-
- host_config['CpuShares'] = cpu_shares
-
- if cpuset_cpus:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpuset_cpus', '1.18')
-
- host_config['CpuSetCpus'] = cpuset_cpus
-
- if blkio_weight:
- if not isinstance(blkio_weight, int):
- raise host_config_type_error('blkio_weight', blkio_weight, 'int')
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight', '1.22')
- host_config["BlkioWeight"] = blkio_weight
-
- if blkio_weight_device:
- if not isinstance(blkio_weight_device, list):
- raise host_config_type_error(
- 'blkio_weight_device', blkio_weight_device, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight_device', '1.22')
- host_config["BlkioWeightDevice"] = blkio_weight_device
-
- if device_read_bps:
- if not isinstance(device_read_bps, list):
- raise host_config_type_error(
- 'device_read_bps', device_read_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_bps', '1.22')
- host_config["BlkioDeviceReadBps"] = device_read_bps
-
- if device_write_bps:
- if not isinstance(device_write_bps, list):
- raise host_config_type_error(
- 'device_write_bps', device_write_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_bps', '1.22')
- host_config["BlkioDeviceWriteBps"] = device_write_bps
-
- if device_read_iops:
- if not isinstance(device_read_iops, list):
- raise host_config_type_error(
- 'device_read_iops', device_read_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_iops', '1.22')
- host_config["BlkioDeviceReadIOps"] = device_read_iops
-
- if device_write_iops:
- if not isinstance(device_write_iops, list):
- raise host_config_type_error(
- 'device_write_iops', device_write_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_iops', '1.22')
- host_config["BlkioDeviceWriteIOps"] = device_write_iops
-
- if tmpfs:
- if version_lt(version, '1.22'):
- raise host_config_version_error('tmpfs', '1.22')
- host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
-
- if userns_mode:
- if version_lt(version, '1.23'):
- raise host_config_version_error('userns_mode', '1.23')
-
- if userns_mode != "host":
- raise host_config_value_error("userns_mode", userns_mode)
- host_config['UsernsMode'] = userns_mode
-
- if pids_limit:
- if not isinstance(pids_limit, int):
- raise host_config_type_error('pids_limit', pids_limit, 'int')
- if version_lt(version, '1.23'):
- raise host_config_version_error('pids_limit', '1.23')
- host_config["PidsLimit"] = pids_limit
-
- return host_config
-
-
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
@@ -907,50 +424,6 @@ def normalize_links(links):
return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
-def create_networking_config(endpoints_config=None):
- networking_config = {}
-
- if endpoints_config:
- networking_config["EndpointsConfig"] = endpoints_config
-
- return networking_config
-
-
-def create_endpoint_config(version, aliases=None, links=None,
- ipv4_address=None, ipv6_address=None,
- link_local_ips=None):
- if version_lt(version, '1.22'):
- raise errors.InvalidVersion(
- 'Endpoint config is not supported for API version < 1.22'
- )
- endpoint_config = {}
-
- if aliases:
- endpoint_config["Aliases"] = aliases
-
- if links:
- endpoint_config["Links"] = normalize_links(links)
-
- ipam_config = {}
- if ipv4_address:
- ipam_config['IPv4Address'] = ipv4_address
-
- if ipv6_address:
- ipam_config['IPv6Address'] = ipv6_address
-
- if link_local_ips is not None:
- if version_lt(version, '1.24'):
- raise errors.InvalidVersion(
- 'link_local_ips is not supported for API version < 1.24'
- )
- ipam_config['LinkLocalIPs'] = link_local_ips
-
- if ipam_config:
- endpoint_config['IPAMConfig'] = ipam_config
-
- return endpoint_config
-
-
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
@@ -964,7 +437,11 @@ def parse_env_file(env_file):
if line[0] == '#':
continue
- parse_line = line.strip().split('=', 1)
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
@@ -993,147 +470,20 @@ def format_environment(environment):
return [format_env(*var) for var in six.iteritems(environment)]
-def create_container_config(
- version, image, command, hostname=None, user=None, detach=False,
- stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
- dns=None, volumes=None, volumes_from=None, network_disabled=False,
- entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
- labels=None, volume_driver=None, stop_signal=None, networking_config=None,
-):
- if isinstance(command, six.string_types):
- command = split_command(command)
-
- if isinstance(entrypoint, six.string_types):
- entrypoint = split_command(entrypoint)
-
- if isinstance(environment, dict):
- environment = format_environment(environment)
-
- if labels is not None and compare_version('1.18', version) < 0:
- raise errors.InvalidVersion(
- 'labels were only introduced in API version 1.18'
- )
-
- if cpuset is not None or cpu_shares is not None:
- if version_gte(version, '1.18'):
- warnings.warn(
- 'The cpuset_cpus and cpu_shares options have been moved to '
- 'host_config in API version 1.18, and will be removed',
- DeprecationWarning
- )
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
- if stop_signal is not None and compare_version('1.21', version) < 0:
- raise errors.InvalidVersion(
- 'stop_signal was only introduced in API version 1.21'
- )
-
- if compare_version('1.19', version) < 0:
- if volume_driver is not None:
- raise errors.InvalidVersion(
- 'Volume drivers were only introduced in API version 1.19'
- )
- mem_limit = mem_limit if mem_limit is not None else 0
- memswap_limit = memswap_limit if memswap_limit is not None else 0
- else:
- if mem_limit is not None:
- raise errors.InvalidVersion(
- 'mem_limit has been moved to host_config in API version 1.19'
- )
+ return [
+ '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
- if memswap_limit is not None:
- raise errors.InvalidVersion(
- 'memswap_limit has been moved to host_config in API '
- 'version 1.19'
- )
- if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
-
- if mem_limit is not None:
- mem_limit = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- memswap_limit = parse_bytes(memswap_limit)
-
- if isinstance(ports, list):
- exposed_ports = {}
- for port_definition in ports:
- port = port_definition
- proto = 'tcp'
- if isinstance(port_definition, tuple):
- if len(port_definition) == 2:
- proto = port_definition[1]
- port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
- ports = exposed_ports
-
- if isinstance(volumes, six.string_types):
- volumes = [volumes, ]
-
- if isinstance(volumes, list):
- volumes_dict = {}
- for vol in volumes:
- volumes_dict[vol] = {}
- volumes = volumes_dict
-
- if volumes_from:
- if not isinstance(volumes_from, six.string_types):
- volumes_from = ','.join(volumes_from)
- else:
- # Force None, an empty list or dict causes client.start to fail
- volumes_from = None
-
- attach_stdin = False
- attach_stdout = False
- attach_stderr = False
- stdin_once = False
-
- if not detach:
- attach_stdout = True
- attach_stderr = True
-
- if stdin_open:
- attach_stdin = True
- stdin_once = True
-
- if compare_version('1.10', version) >= 0:
- message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to host_config')
- if dns is not None:
- raise errors.InvalidVersion(message.format('dns'))
- if volumes_from is not None:
- raise errors.InvalidVersion(message.format('volumes_from'))
-
- return {
- 'Hostname': hostname,
- 'Domainname': domainname,
- 'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
- 'Tty': tty,
- 'OpenStdin': stdin_open,
- 'StdinOnce': stdin_once,
- 'Memory': mem_limit,
- 'AttachStdin': attach_stdin,
- 'AttachStdout': attach_stdout,
- 'AttachStderr': attach_stderr,
- 'Env': environment,
- 'Cmd': command,
- 'Dns': dns,
- 'Image': image,
- 'Volumes': volumes,
- 'VolumesFrom': volumes_from,
- 'NetworkDisabled': network_disabled,
- 'Entrypoint': entrypoint,
- 'CpuShares': cpu_shares,
- 'Cpuset': cpuset,
- 'CpusetCpus': cpuset,
- 'WorkingDir': working_dir,
- 'MemorySwap': memswap_limit,
- 'HostConfig': host_config,
- 'NetworkingConfig': networking_config,
- 'MacAddress': mac_address,
- 'Labels': labels,
- 'VolumeDriver': volume_driver,
- 'StopSignal': stop_signal
- }
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/docker/version.py b/docker/version.py
index 27d014c..28dd1ea 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.10.6"
+version = "3.2.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/docker_py.egg-info/PKG-INFO b/docker_py.egg-info/PKG-INFO
deleted file mode 100644
index cc96266..0000000
--- a/docker_py.egg-info/PKG-INFO
+++ /dev/null
@@ -1,61 +0,0 @@
-Metadata-Version: 1.1
-Name: docker-py
-Version: 1.10.6
-Summary: Python client for Docker.
-Home-page: https://github.com/docker/docker-py/
-Author: Joffrey F
-Author-email: joffrey@docker.com
-License: UNKNOWN
-Description: docker-py
- =========
-
- |Build Status|
-
- A Python library for the Docker Remote API. It does everything the
- ``docker`` command does, but from within Python – run containers, manage
- them, pull/push images, etc.
-
- Installation
- ------------
-
- The latest stable version is always available on PyPi.
-
- ::
-
- pip install docker-py
-
- Documentation
- -------------
-
- |Documentation Status|
-
- `Read the full documentation
- here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
- available in the ``docs/`` directory.
-
- License
- -------
-
- Docker is licensed under the Apache License, Version 2.0. See LICENSE
- for full license text
-
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
- :target: https://travis-ci.org/docker/docker-py
- .. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
-
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Other Environment
-Classifier: Intended Audience :: Developers
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Topic :: Utilities
-Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker_py.egg-info/SOURCES.txt b/docker_py.egg-info/SOURCES.txt
deleted file mode 100644
index 10cd950..0000000
--- a/docker_py.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-LICENSE
-MANIFEST.in
-README.md
-README.rst
-requirements.txt
-setup.cfg
-setup.py
-test-requirements.txt
-docker/__init__.py
-docker/client.py
-docker/constants.py
-docker/errors.py
-docker/tls.py
-docker/version.py
-docker/api/__init__.py
-docker/api/build.py
-docker/api/container.py
-docker/api/daemon.py
-docker/api/exec_api.py
-docker/api/image.py
-docker/api/network.py
-docker/api/service.py
-docker/api/swarm.py
-docker/api/volume.py
-docker/auth/__init__.py
-docker/auth/auth.py
-docker/ssladapter/__init__.py
-docker/ssladapter/ssladapter.py
-docker/transport/__init__.py
-docker/transport/npipeconn.py
-docker/transport/npipesocket.py
-docker/transport/unixconn.py
-docker/types/__init__.py
-docker/types/base.py
-docker/types/containers.py
-docker/types/services.py
-docker/types/swarm.py
-docker/utils/__init__.py
-docker/utils/decorators.py
-docker/utils/socket.py
-docker/utils/types.py
-docker/utils/utils.py
-docker/utils/ports/__init__.py
-docker/utils/ports/ports.py
-docker_py.egg-info/PKG-INFO
-docker_py.egg-info/SOURCES.txt
-docker_py.egg-info/dependency_links.txt
-docker_py.egg-info/not-zip-safe
-docker_py.egg-info/requires.txt
-docker_py.egg-info/top_level.txt
-tests/__init__.py
-tests/base.py
-tests/helpers.py
-tests/integration/__init__.py
-tests/integration/api_test.py
-tests/integration/build_test.py
-tests/integration/conftest.py
-tests/integration/container_test.py
-tests/integration/exec_test.py
-tests/integration/image_test.py
-tests/integration/network_test.py
-tests/integration/regression_test.py
-tests/integration/service_test.py
-tests/integration/swarm_test.py
-tests/integration/volume_test.py
-tests/unit/__init__.py
-tests/unit/api_test.py
-tests/unit/auth_test.py
-tests/unit/build_test.py
-tests/unit/client_test.py
-tests/unit/container_test.py
-tests/unit/exec_test.py
-tests/unit/fake_api.py
-tests/unit/fake_stat.py
-tests/unit/image_test.py
-tests/unit/network_test.py
-tests/unit/ssladapter_test.py
-tests/unit/utils_test.py
-tests/unit/volume_test.py
-tests/unit/testdata/certs/ca.pem
-tests/unit/testdata/certs/cert.pem
-tests/unit/testdata/certs/key.pem \ No newline at end of file
diff --git a/docker_py.egg-info/requires.txt b/docker_py.egg-info/requires.txt
deleted file mode 100644
index a7ddcd4..0000000
--- a/docker_py.egg-info/requires.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-requests >= 2.5.2, != 2.11.0
-six >= 1.4.0
-websocket-client >= 0.32.0
-docker-pycreds >= 0.2.1
-
-[:python_version < "3.3"]
-ipaddress >= 1.0.16
-
-[:python_version < "3.5"]
-backports.ssl_match_hostname >= 3.5
diff --git a/requirements.txt b/requirements.txt
index 3754131..2b281ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,18 @@
-requests==2.11.1
-six>=1.4.0
-websocket-client==0.32.0
-backports.ssl_match_hostname>=3.5 ; python_version < '3.5'
-ipaddress==1.0.16 ; python_version < '3.3'
-docker-pycreds==0.2.1
+appdirs==1.4.3
+asn1crypto==0.22.0
+backports.ssl-match-hostname==3.5.0.1
+cffi==1.10.0
+cryptography==1.9
+docker-pycreds==0.2.2
+enum34==1.1.6
+idna==2.5
+ipaddress==1.0.18
+packaging==16.8
+pycparser==2.17
+pyOpenSSL==17.0.0
+pyparsing==2.2.0
+pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
+pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
+requests==2.14.2
+six==1.10.0
+websocket-client==0.40.0
diff --git a/setup.cfg b/setup.cfg
index 19cf102..95b126b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,9 +3,9 @@ universal = 1
[metadata]
description_file = README.rst
+license = Apache License 2.0
[egg_info]
tag_build =
tag_date = 0
-tag_svn_revision = 0
diff --git a/setup.py b/setup.py
index 4538d91..271d94f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,26 +1,53 @@
#!/usr/bin/env python
+from __future__ import print_function
+
+import codecs
import os
import sys
-from setuptools import setup
+import pip
+
+from setuptools import setup, find_packages
+if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:
+ print(
+ 'ERROR: "docker-py" needs to be uninstalled before installing this'
+ ' package:\npip uninstall docker-py', file=sys.stderr
+ )
+ sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2, != 2.11.0',
+ 'requests >= 2.14.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
- 'docker-pycreds >= 0.2.1'
+ 'docker-pycreds >= 0.2.2'
]
-if sys.platform == 'win32':
- requirements.append('pypiwin32 >= 219')
-
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
+ # While not imported explicitly, the ipaddress module is required for
+ # ssl_match_hostname to verify hosts match with certificates via
+ # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
+
+ # win32 APIs if on Windows (required for npipe support)
+ # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
+ # on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
+ ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
+ ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
+
+ # If using docker-py over TLS, highly recommend this option is
+ # pip-installed or pinned.
+
+ # TODO: if pip installing both "requests" and "requests[security]", the
+ # extra package from the "security" option are not installed (see
+ # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
+ # installing the extra dependencies, install the following instead:
+ # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
+ 'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
}
version = None
@@ -32,7 +59,7 @@ with open('./test-requirements.txt') as test_reqs_txt:
long_description = ''
try:
- with open('./README.rst') as readme_rst:
+ with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
@@ -40,34 +67,30 @@ except IOError:
pass
setup(
- name="docker-py",
+ name="docker",
version=version,
- description="Python client for Docker.",
+ description="A Python library for the Docker Engine API.",
long_description=long_description,
- url='https://github.com/docker/docker-py/',
- packages=[
- 'docker', 'docker.api', 'docker.auth', 'docker.transport',
- 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
- 'docker.types',
- ],
+ url='https://github.com/docker/docker-py',
+ packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
diff --git a/test-requirements.txt b/test-requirements.txt
index 460db10..09680b6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,5 +1,6 @@
+coverage==3.7.1
+flake8==3.4.1
mock==1.0.1
pytest==2.9.1
-coverage==3.7.1
pytest-cov==2.1.0
-flake8==2.4.1
+pytest-timeout==1.2.1
diff --git a/tests/base.py b/tests/base.py
deleted file mode 100644
index a2c01fc..0000000
--- a/tests/base.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import sys
-import unittest
-
-import pytest
-import six
-
-import docker
-
-
-class BaseTestCase(unittest.TestCase):
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(BaseTestCase, self).assertIn(object, collection)
-
-
-def requires_api_version(version):
- return pytest.mark.skipif(
- docker.utils.version_lt(
- docker.constants.DEFAULT_DOCKER_API_VERSION, version
- ),
- reason="API version is too low (< {0})".format(version)
- )
-
-
-class Cleanup(object):
- if sys.version_info < (2, 7):
- # Provide a basic implementation of addCleanup for Python < 2.7
- def __init__(self, *args, **kwargs):
- super(Cleanup, self).__init__(*args, **kwargs)
- self._cleanups = []
-
- def tearDown(self):
- super(Cleanup, self).tearDown()
- ok = True
- while self._cleanups:
- fn, args, kwargs = self._cleanups.pop(-1)
- try:
- fn(*args, **kwargs)
- except KeyboardInterrupt:
- raise
- except:
- ok = False
- if not ok:
- raise
-
- def addCleanup(self, function, *args, **kwargs):
- self._cleanups.append((function, args, kwargs))
diff --git a/tests/helpers.py b/tests/helpers.py
index 40baef9..b6b493b 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,15 +1,16 @@
+import functools
import os
import os.path
-import shutil
+import random
import tarfile
import tempfile
-import unittest
-
-import docker
+import time
+import re
import six
+import socket
-BUSYBOX = 'busybox:buildroot-2014.02'
-EXEC_DRIVER = []
+import docker
+import pytest
def make_tree(dirs, files):
@@ -45,86 +46,88 @@ def untar_file(tardata, filename):
return result
-def docker_client(**kwargs):
- return docker.Client(**docker_client_kwargs(**kwargs))
-
-
-def docker_client_kwargs(**kwargs):
- client_kwargs = docker.utils.kwargs_from_env(assert_hostname=False)
- client_kwargs.update(kwargs)
- return client_kwargs
-
-
-class BaseTestCase(unittest.TestCase):
- tmp_imgs = []
- tmp_containers = []
- tmp_folders = []
- tmp_volumes = []
-
- def setUp(self):
- if six.PY2:
- self.assertRegex = self.assertRegexpMatches
- self.assertCountEqual = self.assertItemsEqual
- self.client = docker_client(timeout=60)
- self.tmp_imgs = []
- self.tmp_containers = []
- self.tmp_folders = []
- self.tmp_volumes = []
- self.tmp_networks = []
-
- def tearDown(self):
- for img in self.tmp_imgs:
- try:
- self.client.remove_image(img)
- except docker.errors.APIError:
- pass
- for container in self.tmp_containers:
- try:
- self.client.stop(container, timeout=1)
- self.client.remove_container(container)
- except docker.errors.APIError:
- pass
- for network in self.tmp_networks:
- try:
- self.client.remove_network(network)
- except docker.errors.APIError:
- pass
- for folder in self.tmp_folders:
- shutil.rmtree(folder)
-
- for volume in self.tmp_volumes:
- try:
- self.client.remove_volume(volume)
- except docker.errors.APIError:
- pass
-
- self.client.close()
-
- def run_container(self, *args, **kwargs):
- container = self.client.create_container(*args, **kwargs)
- self.tmp_containers.append(container)
- self.client.start(container)
- exitcode = self.client.wait(container)
-
- if exitcode != 0:
- output = self.client.logs(container)
- raise Exception(
- "Container exited with code {}:\n{}"
- .format(exitcode, output))
-
- return container
-
- def create_and_start(self, image='busybox', command='top', **kwargs):
- container = self.client.create_container(
- image=image, command=command, **kwargs)
- self.tmp_containers.append(container)
- self.client.start(container)
- return container
-
- def execute(self, container, cmd, exit_code=0, **kwargs):
- exc = self.client.exec_create(container, cmd, **kwargs)
- output = self.client.exec_start(exc)
- actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
- msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
- " ".join(cmd), exit_code, actual_exit_code, output)
- assert actual_exit_code == exit_code, msg
+def requires_api_version(version):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ return pytest.mark.skipif(
+ docker.utils.version_lt(test_version, version),
+ reason="API version is too low (< {0})".format(version)
+ )
+
+
+def requires_experimental(until=None):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ def req_exp(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if not self.client.info()['ExperimentalBuild']:
+ pytest.skip('Feature requires Docker Engine experimental mode')
+ return f(self, *args, **kwargs)
+
+ if until and docker.utils.version_gte(test_version, until):
+ return f
+ return wrapped
+
+ return req_exp
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def random_name():
+ return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+
+def force_leave_swarm(client):
+ """Actually force leave a Swarm. There seems to be a bug in Swarm that
+ occasionally throws "context deadline exceeded" errors when leaving."""
+ while True:
+ try:
+ if isinstance(client, docker.DockerClient):
+ return client.swarm.leave(force=True)
+ return client.leave_swarm(force=True) # elif APIClient
+ except docker.errors.APIError as e:
+ if e.explanation == "context deadline exceeded":
+ continue
+ else:
+ return
+
+
+def swarm_listen_addr():
+ return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
+
+
+def assert_cat_socket_detached_with_keys(sock, inputs):
+ if six.PY3 and hasattr(sock, '_sock'):
+ sock = sock._sock
+
+ for i in inputs:
+ sock.sendall(i)
+ time.sleep(0.5)
+
+ # If we're using a Unix socket, the sock.send call will fail with a
+ # BrokenPipeError ; INET sockets will just stop receiving / sending data
+ # but will not raise an error
+ if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
+ with pytest.raises(socket.error):
+ sock.sendall(b'make sure the socket is closed\n')
+ else:
+ sock.sendall(b"make sure the socket is closed\n")
+ assert sock.recv(32) == b''
+
+
+def ctrl_with(char):
+ if re.match('[a-z]', char):
+ return chr(ord(char) - ord('a') + 1).encode('ascii')
+ else:
+ raise(Exception('char must be [a-z]'))
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
new file mode 100644
index 0000000..8910eb7
--- /dev/null
+++ b/tests/integration/api_build_test.py
@@ -0,0 +1,474 @@
+import io
+import os
+import shutil
+import tempfile
+
+from docker import errors
+
+import pytest
+import six
+
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import random_name, requires_api_version, requires_experimental
+
+
+class BuildTest(BaseAPIIntegrationTest):
+ def test_build_streaming(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ stream = self.client.build(fileobj=script, decode=True)
+ logs = []
+ for chunk in stream:
+ logs.append(chunk)
+ assert len(logs) > 0
+
+ def test_build_from_stringio(self):
+ if six.PY3:
+ return
+ script = io.StringIO(six.text_type('\n').join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]))
+ stream = self.client.build(fileobj=script)
+ logs = ''
+ for chunk in stream:
+ if six.PY3:
+ chunk = chunk.decode('utf-8')
+ logs += chunk
+ assert logs != ''
+
+ def test_build_with_dockerignore(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write("\n".join([
+ 'ignored',
+ 'Dockerfile',
+ '.dockerignore',
+ '!ignored/subdir/excepted-file',
+ '', # empty line,
+ '#*', # comment line
+ ]))
+
+ with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
+ f.write('this file should not be ignored')
+
+ subdir = os.path.join(base_dir, 'ignored', 'subdir')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'file'), 'w') as f:
+ f.write("this file should be ignored")
+
+ with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ tag = 'docker-py-test-build-with-dockerignore'
+ stream = self.client.build(
+ path=base_dir,
+ tag=tag,
+ )
+ for chunk in stream:
+ pass
+
+ c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
+ self.client.start(c)
+ self.client.wait(c)
+ logs = self.client.logs(c)
+
+ if six.PY3:
+ logs = logs.decode('utf-8')
+
+ assert sorted(list(filter(None, logs.split('\n')))) == sorted([
+ '/test/#file.txt',
+ '/test/ignored/subdir/excepted-file',
+ '/test/not-ignored'
+ ])
+
+ def test_build_with_buildargs(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'ARG test',
+ 'USER $test'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
+ )
+ self.tmp_imgs.append('buildargs')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('buildargs')
+ assert info['Config']['User'] == 'OK'
+
+ @requires_api_version('1.22')
+ def test_build_shmsize(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Hello, World!\'"',
+ ]).encode('ascii'))
+
+ tag = 'shmsize'
+ shmsize = 134217728
+
+ stream = self.client.build(
+ fileobj=script, tag=tag, shmsize=shmsize
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ # There is currently no way to get the shmsize
+ # that was used to build the image
+
+ @requires_api_version('1.24')
+ def test_build_isolation(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Deaf To All But The Song\''
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='isolation',
+ isolation='default'
+ )
+
+ for chunk in stream:
+ pass
+
+ @requires_api_version('1.23')
+ def test_build_labels(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ ]).encode('ascii'))
+
+ labels = {'test': 'OK'}
+
+ stream = self.client.build(
+ fileobj=script, tag='labels', labels=labels
+ )
+ self.tmp_imgs.append('labels')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('labels')
+ assert info['Config']['Labels'] == labels
+
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
+ @requires_api_version('1.29')
+ def test_build_container_with_target(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox as first',
+ 'RUN mkdir -p /tmp/test',
+ 'RUN touch /tmp/silence.tar.gz',
+ 'FROM alpine:latest',
+ 'WORKDIR /root/'
+ 'COPY --from=first /tmp/silence.tar.gz .',
+ 'ONBUILD RUN echo "This should not be in the final image"'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, target='first', tag='build1'
+ )
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('build1')
+ assert not info['Config']['OnBuild']
+
+ @requires_api_version('1.25')
+ def test_build_with_network_mode(self):
+ # Set up pingable endpoint on custom network
+ network = self.client.create_network(random_name())['Id']
+ self.tmp_networks.append(network)
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.connect_container_to_network(
+ container, network, aliases=['pingtarget.docker']
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 pingtarget.docker'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, network_mode=network,
+ tag='dockerpytest_customnetbuild'
+ )
+
+ self.tmp_imgs.append('dockerpytest_customnetbuild')
+ for chunk in stream:
+ pass
+
+ assert self.client.inspect_image('dockerpytest_customnetbuild')
+
+ script.seek(0)
+ stream = self.client.build(
+ fileobj=script, network_mode='none',
+ tag='dockerpytest_nonebuild', nocache=True, decode=True
+ )
+
+ self.tmp_imgs.append('dockerpytest_nonebuild')
+ logs = [chunk for chunk in stream]
+ assert 'errorDetail' in logs[-1]
+ assert logs[-1]['errorDetail']['code'] == 1
+
+ with pytest.raises(errors.NotFound):
+ self.client.inspect_image('dockerpytest_nonebuild')
+
+ @requires_api_version('1.27')
+ def test_build_with_extra_hosts(self):
+ img_name = 'dockerpytest_extrahost_build'
+ self.tmp_imgs.append(img_name)
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 hello.world.test',
+ 'RUN ping -c1 extrahost.local.test',
+ 'RUN cp /etc/hosts /hosts-file'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag=img_name,
+ extra_hosts={
+ 'extrahost.local.test': '127.0.0.1',
+ 'hello.world.test': '127.0.0.1',
+ }, decode=True
+ )
+ for chunk in stream:
+ if 'errorDetail' in chunk:
+ pytest.fail(chunk)
+
+ assert self.client.inspect_image(img_name)
+ ctnr = self.run_container(img_name, 'cat /hosts-file')
+ self.tmp_containers.append(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert '127.0.0.1\textrahost.local.test' in logs
+ assert '127.0.0.1\thello.world.test' in logs
+
+ @requires_experimental(until=None)
+ @requires_api_version('1.25')
+ def test_build_squash(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN echo blah > /file_1',
+ 'RUN echo blahblah > /file_2',
+ 'RUN echo blahblahblah > /file_3'
+ ]).encode('ascii'))
+
+ def build_squashed(squash):
+ tag = 'squash' if squash else 'nosquash'
+ stream = self.client.build(
+ fileobj=script, tag=tag, squash=squash
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ return self.client.inspect_image(tag)
+
+ non_squashed = build_squashed(False)
+ squashed = build_squashed(True)
+ assert len(non_squashed['RootFS']['Layers']) == 4
+ assert len(squashed['RootFS']['Layers']) == 2
+
+ def test_build_stderr_data(self):
+ control_chars = ['\x1b[91m', '\x1b[0m']
+ snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
+ script = io.BytesIO(b'\n'.join([
+ b'FROM busybox',
+ 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
+ ]))
+
+ stream = self.client.build(
+ fileobj=script, decode=True, nocache=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk.get('stream'))
+ expected = '{0}{2}\n{1}'.format(
+ control_chars[0], control_chars[1], snippet
+ )
+ assert any([line == expected for line in lines])
+
+ def test_build_gzip_encoding(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True,
+ gzip=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_with_dockerfile_empty_lines(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('\n'.join([
+ ' ',
+ '',
+ '\t\t',
+ '\t ',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_gzip_custom_encoding(self):
+ with pytest.raises(errors.DockerException):
+ self.client.build(path='.', gzip=True, encoding='text/html')
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_build_invalid_platform(self):
+ script = io.BytesIO('FROM busybox\n'.encode('ascii'))
+
+ with pytest.raises(errors.APIError) as excinfo:
+ stream = self.client.build(fileobj=script, platform='foobar')
+ for _ in stream:
+ pass
+
+ assert excinfo.value.status_code == 400
+ assert 'invalid platform' in excinfo.exconly()
+
+ def test_build_out_of_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('.dockerignore\n')
+ df = tempfile.NamedTemporaryFile()
+ self.addCleanup(df.close)
+ df.write(('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ])).encode('utf-8'))
+ df.flush()
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=df.name, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 3
+ assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
+
+ def test_build_in_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ print(os.path.join(base_dir, 'custom.dockerfile'))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
new file mode 100644
index 0000000..05281f8
--- /dev/null
+++ b/tests/integration/api_client_test.py
@@ -0,0 +1,117 @@
+import base64
+import os
+import tempfile
+import time
+import unittest
+import warnings
+
+import docker
+from docker.utils import kwargs_from_env
+
+from .base import BaseAPIIntegrationTest
+
+
+class InformationTest(BaseAPIIntegrationTest):
+ def test_version(self):
+ res = self.client.version()
+ assert 'GoVersion' in res
+ assert 'Version' in res
+
+ def test_info(self):
+ res = self.client.info()
+ assert 'Containers' in res
+ assert 'Images' in res
+ assert 'Debug' in res
+
+
+class LoadConfigTest(BaseAPIIntegrationTest):
+ def test_load_legacy_config(self):
+ folder = tempfile.mkdtemp()
+ self.tmp_folders.append(folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ f = open(cfg_path, 'w')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ f.write('auth = {0}\n'.format(auth_))
+ f.write('email = sakuya@scarlet.net')
+ f.close()
+ cfg = docker.auth.load_config(cfg_path)
+ assert cfg[docker.auth.INDEX_NAME] is not None
+ cfg = cfg[docker.auth.INDEX_NAME]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
+
+ def test_load_json_config(self):
+ folder = tempfile.mkdtemp()
+ self.tmp_folders.append(folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ f = open(os.path.join(folder, '.dockercfg'), 'w')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ email_ = 'sakuya@scarlet.net'
+ f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
+ docker.auth.INDEX_URL, auth_, email_))
+ f.close()
+ cfg = docker.auth.load_config(cfg_path)
+ assert cfg[docker.auth.INDEX_URL] is not None
+ cfg = cfg[docker.auth.INDEX_URL]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
+
+
+class AutoDetectVersionTest(unittest.TestCase):
+ def test_client_init(self):
+ client = docker.APIClient(version='auto', **kwargs_from_env())
+ client_version = client._version
+ api_version = client.version(api_version=False)['ApiVersion']
+ assert client_version == api_version
+ api_version_2 = client.version()['ApiVersion']
+ assert client_version == api_version_2
+ client.close()
+
+
+class ConnectionTimeoutTest(unittest.TestCase):
+ def setUp(self):
+ self.timeout = 0.5
+ self.client = docker.api.APIClient(
+ version=docker.constants.MINIMUM_DOCKER_API_VERSION,
+ base_url='http://192.168.10.2:4243',
+ timeout=self.timeout
+ )
+
+ def test_timeout(self):
+ start = time.time()
+ res = None
+ # This call isn't supposed to complete, and it should fail fast.
+ try:
+ res = self.client.inspect_container('id')
+ except:
+ pass
+ end = time.time()
+ assert res is None
+ assert end - start < 2 * self.timeout
+
+
+class UnixconnTest(unittest.TestCase):
+ """
+ Test UNIX socket connection adapter.
+ """
+
+ def test_resource_warnings(self):
+ """
+ Test no warnings are produced when using the client.
+ """
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+
+ client = docker.APIClient(version='auto', **kwargs_from_env())
+ client.images()
+ client.close()
+ del client
+
+ assert len(w) == 0, "No warnings produced: {0}".format(
+ w[0].message
+ )
diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py
new file mode 100644
index 0000000..0ffd767
--- /dev/null
+++ b/tests/integration/api_config_test.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.30')
+class ConfigAPITest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def test_create_config(self):
+ config_id = self.client.create_config(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_config_unicode_data(self):
+ config_id = self.client.create_config(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == config_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ assert self.client.remove_config(config_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_config(config_id)
+
+ def test_list_configs(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ data = self.client.configs(filters={'name': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == config_id['ID']
diff --git a/tests/integration/container_test.py b/tests/integration/api_container_test.py
index e390acb..e212518 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/api_container_test.py
@@ -1,45 +1,52 @@
import os
+import re
import signal
import tempfile
+import threading
+from datetime import datetime
import docker
from docker.constants import IS_WINDOWS_PLATFORM
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
+
import pytest
+
+import requests
import six
-from ..base import requires_api_version
+from .base import BUSYBOX, BaseAPIIntegrationTest
from .. import helpers
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
-BUSYBOX = helpers.BUSYBOX
-
-class ListContainersTest(helpers.BaseTestCase):
+class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res1)
+ assert 'Id' in res1
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
- self.assertEqual(size + 1, len(res2))
+ assert size + 1 == len(res2)
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
- self.assertEqual(len(retrieved), 1)
+ assert len(retrieved) == 1
retrieved = retrieved[0]
- self.assertIn('Command', retrieved)
- self.assertEqual(retrieved['Command'], six.text_type('true'))
- self.assertIn('Image', retrieved)
- self.assertRegex(retrieved['Image'], r'busybox:.*')
- self.assertIn('Status', retrieved)
+ assert 'Command' in retrieved
+ assert retrieved['Command'] == six.text_type('true')
+ assert 'Image' in retrieved
+ assert re.search(r'busybox:.*', retrieved['Image'])
+ assert 'Status' in retrieved
-class CreateContainerTest(helpers.BaseTestCase):
+class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
def test_create_with_host_pid_mode(self):
@@ -48,14 +55,14 @@ class CreateContainerTest(helpers.BaseTestCase):
pid_mode='host', network_mode='none'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
- self.assertIn('PidMode', host_config)
- self.assertEqual(host_config['PidMode'], 'host')
+ assert 'PidMode' in host_config
+ assert host_config['PidMode'] == 'host'
def test_create_with_links(self):
res0 = self.client.create_container(
@@ -96,15 +103,15 @@ class CreateContainerTest(helpers.BaseTestCase):
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
- self.assertEqual(self.client.wait(container3_id), 0)
+ assert self.client.wait(container3_id)['StatusCode'] == 0
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
- self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
- self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
- self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
+ assert '{0}_NAME='.format(link_env_prefix1) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
+ assert '{0}_NAME='.format(link_env_prefix2) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
def test_create_with_restart_policy(self):
container = self.client.create_container(
@@ -117,12 +124,10 @@ class CreateContainerTest(helpers.BaseTestCase):
id = container['Id']
self.client.start(id)
self.client.wait(id)
- with self.assertRaises(docker.errors.APIError) as exc:
+ with pytest.raises(docker.errors.APIError) as exc:
self.client.remove_container(id)
- err = exc.exception.response.text
- self.assertIn(
- 'You cannot remove a running container', err
- )
+ err = exc.value.explanation
+ assert 'You cannot remove ' in err
self.client.remove_container(id, force=True)
def test_create_container_with_volumes_from(self):
@@ -141,23 +146,19 @@ class CreateContainerTest(helpers.BaseTestCase):
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
- with self.assertRaises(docker.errors.DockerException):
- self.client.create_container(
- BUSYBOX, 'cat', detach=True, stdin_open=True,
- volumes_from=vol_names
- )
- res2 = self.client.create_container(
+
+ res = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
- container3_id = res2['Id']
+ container3_id = res['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
- info = self.client.inspect_container(res2['Id'])
- self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
+ info = self.client.inspect_container(res['Id'])
+ assert len(info['HostConfig']['VolumesFrom']) == len(vol_names)
def create_container_readonly_fs(self):
ctnr = self.client.create_container(
@@ -166,19 +167,19 @@ class CreateContainerTest(helpers.BaseTestCase):
read_only=True, network_mode='none'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
- res = self.client.wait(ctnr)
- self.assertNotEqual(res, 0)
+ res = self.client.wait(ctnr)['StatusCode']
+ assert res != 0
def create_container_with_name(self):
res = self.client.create_container(BUSYBOX, 'true', name='foobar')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Name', inspect)
- self.assertEqual('/foobar', inspect['Name'])
+ assert 'Name' in inspect
+ assert '/foobar' == inspect['Name']
def create_container_privileged(self):
res = self.client.create_container(
@@ -186,24 +187,24 @@ class CreateContainerTest(helpers.BaseTestCase):
privileged=True, network_mode='none'
)
)
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
- self.assertEqual(inspect['Config']['Privileged'], True)
+ assert inspect['Config']['Privileged'] is True
def test_create_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
@@ -214,12 +215,10 @@ class CreateContainerTest(helpers.BaseTestCase):
self.client.start(container)
res = self.client.inspect_container(container['Id'])
- self.assertEqual(mac_address_expected,
- res['NetworkSettings']['MacAddress'])
+ assert mac_address_expected == res['NetworkSettings']['MacAddress']
self.client.kill(id)
- @requires_api_version('1.20')
def test_group_id_ints(self):
container = self.client.create_container(
BUSYBOX, 'id -G',
@@ -233,10 +232,9 @@ class CreateContainerTest(helpers.BaseTestCase):
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
- self.assertIn('1000', groups)
- self.assertIn('1001', groups)
+ assert '1000' in groups
+ assert '1001' in groups
- @requires_api_version('1.20')
def test_group_id_strings(self):
container = self.client.create_container(
BUSYBOX, 'id -G', host_config=self.client.create_host_config(
@@ -252,11 +250,11 @@ class CreateContainerTest(helpers.BaseTestCase):
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
- self.assertIn('1000', groups)
- self.assertIn('1001', groups)
+ assert '1000' in groups
+ assert '1001' in groups
def test_valid_log_driver_and_log_opt(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='json-file',
config={'max-file': '100'}
)
@@ -271,11 +269,11 @@ class CreateContainerTest(helpers.BaseTestCase):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], log_config.type)
- self.assertEqual(container_log_config['Config'], log_config.config)
+ assert container_log_config['Type'] == log_config.type
+ assert container_log_config['Config'] == log_config.config
def test_invalid_log_driver_raises_exception(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='asdf-nope',
config={}
)
@@ -290,10 +288,10 @@ class CreateContainerTest(helpers.BaseTestCase):
)
self.client.start(container)
- assert six.b(expected_msg) in excinfo.value.explanation
+ assert excinfo.value.explanation == expected_msg
def test_valid_no_log_driver_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="",
config={'max-file': '100'}
)
@@ -308,11 +306,11 @@ class CreateContainerTest(helpers.BaseTestCase):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], "json-file")
- self.assertEqual(container_log_config['Config'], log_config.config)
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == log_config.config
def test_valid_no_config_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="json-file",
config=None
)
@@ -327,8 +325,8 @@ class CreateContainerTest(helpers.BaseTestCase):
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
- self.assertEqual(container_log_config['Type'], "json-file")
- self.assertEqual(container_log_config['Config'], {})
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == {}
def test_create_with_memory_constraints_with_str(self):
ctnr = self.client.create_container(
@@ -338,36 +336,29 @@ class CreateContainerTest(helpers.BaseTestCase):
mem_limit='700M'
)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
for limit in ['Memory', 'MemorySwap']:
- self.assertIn(limit, host_config)
+ assert limit in host_config
def test_create_with_memory_constraints_with_int(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(mem_swappiness=40)
)
- self.assertIn('Id', ctnr)
+ assert 'Id' in ctnr
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
- self.assertIn('HostConfig', inspect)
+ assert 'HostConfig' in inspect
host_config = inspect['HostConfig']
- self.assertIn('MemorySwappiness', host_config)
-
- def test_create_host_config_exception_raising(self):
- self.assertRaises(TypeError,
- self.client.create_host_config, mem_swappiness='40')
-
- self.assertRaises(ValueError,
- self.client.create_host_config, pid_mode='40')
+ assert 'MemorySwappiness' in host_config
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
@@ -398,8 +389,109 @@ class CreateContainerTest(helpers.BaseTestCase):
config = self.client.inspect_container(container)
assert config['HostConfig']['Tmpfs'] == tmpfs
+ @requires_api_version('1.24')
+ def test_create_with_isolation(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ isolation='default'
+ )
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Isolation'] == 'default'
+
+ @requires_api_version('1.25')
+ def test_create_with_auto_remove(self):
+ host_config = self.client.create_host_config(
+ auto_remove=True
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['AutoRemove'] is True
-class VolumeBindTest(helpers.BaseTestCase):
+ @requires_api_version('1.25')
+ def test_create_with_stop_timeout(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], stop_timeout=25
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['Config']['StopTimeout'] == 25
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ def test_create_with_storage_opt(self):
+ host_config = self.client.create_host_config(
+ storage_opt={'size': '120G'}
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['StorageOpt'] == {
+ 'size': '120G'
+ }
+
+ @requires_api_version('1.25')
+ def test_create_with_init(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init=True
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['Init'] is True
+
+ @pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
+ @requires_api_version('1.25')
+ def test_create_with_init_path(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init_path="/usr/libexec/docker-init"
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
+ reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
+ def test_create_with_cpu_rt_options(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true', host_config=self.client.create_host_config(
+ cpu_rt_period=1000, cpu_rt_runtime=500
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['CpuRealtimeRuntime'] == 500
+ assert config['HostConfig']['CpuRealtimePeriod'] == 1000
+
+ @requires_api_version('1.28')
+ def test_create_with_device_cgroup_rules(self):
+ rule = 'c 7:128 rwm'
+ ctnr = self.client.create_container(
+ BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list',
+ host_config=self.client.create_host_config(
+ device_cgroup_rules=[rule]
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['DeviceCgroupRules'] == [rule]
+ self.client.start(ctnr)
+ assert rule in self.client.logs(ctnr).decode('utf-8')
+
+
+class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
super(VolumeBindTest, self).setUp()
@@ -429,7 +521,7 @@ class VolumeBindTest(helpers.BaseTestCase):
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn(self.filename, logs)
+ assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@@ -451,30 +543,86 @@ class VolumeBindTest(helpers.BaseTestCase):
if six.PY3:
logs = logs.decode('utf-8')
- self.assertIn(self.filename, logs)
+ assert self.filename in logs
+
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, False)
+
+ @pytest.mark.xfail(
+ IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
+ )
+ @requires_api_version('1.30')
+ def test_create_with_mounts(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, True)
+ @pytest.mark.xfail(
+ IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
+ )
+ @requires_api_version('1.30')
+ def test_create_with_mounts_ro(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest,
+ read_only=True
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
+ @requires_api_version('1.30')
+ def test_create_with_volume_mount(self):
+ mount = docker.types.Mount(
+ type="volume", source=helpers.random_name(),
+ target=self.mount_dest, labels={'com.dockerpy.test': 'true'}
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.client.create_container(
+ BUSYBOX, ['true'], host_config=host_config,
+ )
+ assert container
+ inspect_data = self.client.inspect_container(container)
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount['Source'] == mount_data['Name']
+ assert mount_data['RW'] is True
+
def check_container_data(self, inspect_data, rw):
- if docker.utils.compare_version('1.20', self.client._version) < 0:
- self.assertIn('Volumes', inspect_data)
- self.assertIn(self.mount_dest, inspect_data['Volumes'])
- self.assertEqual(
- self.mount_origin, inspect_data['Volumes'][self.mount_dest]
- )
- self.assertIn(self.mount_dest, inspect_data['VolumesRW'])
- self.assertFalse(inspect_data['VolumesRW'][self.mount_dest])
- else:
- self.assertIn('Mounts', inspect_data)
- filtered = list(filter(
- lambda x: x['Destination'] == self.mount_dest,
- inspect_data['Mounts']
- ))
- self.assertEqual(len(filtered), 1)
- mount_data = filtered[0]
- self.assertEqual(mount_data['Source'], self.mount_origin)
- self.assertEqual(mount_data['RW'], rw)
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount_data['Source'] == self.mount_origin
+ assert mount_data['RW'] == rw
def run_with_volume(self, ro, *args, **kwargs):
return self.run_container(
@@ -493,8 +641,7 @@ class VolumeBindTest(helpers.BaseTestCase):
)
-@requires_api_version('1.20')
-class ArchiveTest(helpers.BaseTestCase):
+class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
@@ -512,7 +659,7 @@ class ArchiveTest(helpers.BaseTestCase):
retrieved_data = helpers.untar_file(destination, 'data.txt')
if six.PY3:
retrieved_data = retrieved_data.decode('utf-8')
- self.assertEqual(data, retrieved_data.strip())
+ assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
@@ -524,10 +671,10 @@ class ArchiveTest(helpers.BaseTestCase):
self.client.start(ctnr)
self.client.wait(ctnr)
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
- self.assertIn('name', stat)
- self.assertEqual(stat['name'], 'data.txt')
- self.assertIn('size', stat)
- self.assertEqual(stat['size'], len(data))
+ assert 'name' in stat
+ assert stat['name'] == 'data.txt'
+ assert 'size' in stat
+ assert stat['size'] == len(data)
def test_copy_file_to_container(self):
data = b'Deaf To All But The Song'
@@ -550,7 +697,7 @@ class ArchiveTest(helpers.BaseTestCase):
if six.PY3:
logs = logs.decode('utf-8')
data = data.decode('utf-8')
- self.assertEqual(logs.strip(), data)
+ assert logs.strip() == data
def test_copy_directory_to_container(self):
files = ['a.py', 'b.py', 'foo/b.py']
@@ -568,60 +715,60 @@ class ArchiveTest(helpers.BaseTestCase):
if six.PY3:
logs = logs.decode('utf-8')
results = logs.strip().split()
- self.assertIn('a.py', results)
- self.assertIn('b.py', results)
- self.assertIn('foo/', results)
- self.assertIn('bar/', results)
+ assert 'a.py' in results
+ assert 'b.py' in results
+ assert 'foo/' in results
+ assert 'bar/' in results
-class RenameContainerTest(helpers.BaseTestCase):
+class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Name', inspect)
+ assert 'Name' in inspect
if version == '1.5.0':
- self.assertEqual(name, inspect['Name'])
+ assert name == inspect['Name']
else:
- self.assertEqual('/{0}'.format(name), inspect['Name'])
+ assert '/{0}'.format(name) == inspect['Name']
-class StartContainerTest(helpers.BaseTestCase):
+class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
def test_start_container_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, 'true')
- self.assertIn('Id', res)
+ assert 'Id' in res
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
- self.assertIn('Config', inspect)
- self.assertIn('Id', inspect)
- self.assertTrue(inspect['Id'].startswith(res['Id']))
- self.assertIn('Image', inspect)
- self.assertIn('State', inspect)
- self.assertIn('Running', inspect['State'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
if not inspect['State']['Running']:
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], 0)
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
def test_run_shlex_commands(self):
commands = [
@@ -640,39 +787,56 @@ class StartContainerTest(helpers.BaseTestCase):
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0, msg=cmd)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0, cmd
-class WaitTest(helpers.BaseTestCase):
+class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
inspect = self.client.inspect_container(id)
- self.assertIn('Running', inspect['State'])
- self.assertEqual(inspect['State']['Running'], False)
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], exitcode)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
def test_wait_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
- exitcode = self.client.wait(res)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(res)['StatusCode']
+ assert exitcode == 0
inspect = self.client.inspect_container(res)
- self.assertIn('Running', inspect['State'])
- self.assertEqual(inspect['State']['Running'], False)
- self.assertIn('ExitCode', inspect['State'])
- self.assertEqual(inspect['State']['ExitCode'], exitcode)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
+
+ @requires_api_version('1.30')
+ def test_wait_with_condition(self):
+ ctnr = self.client.create_container(BUSYBOX, 'true')
+ self.tmp_containers.append(ctnr)
+ with pytest.raises(requests.exceptions.ConnectionError):
+ self.client.wait(ctnr, condition='removed', timeout=1)
+
+ ctnr = self.client.create_container(
+ BUSYBOX, ['sleep', '3'],
+ host_config=self.client.create_host_config(auto_remove=True)
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ assert self.client.wait(
+ ctnr, condition='removed', timeout=5
+ )['StatusCode'] == 0
-class LogsTest(helpers.BaseTestCase):
+class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
@@ -681,10 +845,10 @@ class LogsTest(helpers.BaseTestCase):
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id)
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_tail_option(self):
snippet = '''Line1
@@ -695,10 +859,10 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id, tail=1)
- self.assertEqual(logs, 'Line2\n'.encode(encoding='ascii'))
+ assert logs == 'Line2\n'.encode(encoding='ascii')
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -712,10 +876,29 @@ Line2'''
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ @pytest.mark.timeout(5)
+ def test_logs_streaming_and_follow_and_cancel(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ logs = six.binary_type()
+
+ generator = self.client.logs(id, stream=True, follow=True)
+ threading.Timer(1, generator.close).start()
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ for chunk in generator:
+ logs += chunk
+
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -725,10 +908,10 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(container)
- self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
+ assert logs == (snippet + '\n').encode(encoding='ascii')
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
@@ -738,41 +921,57 @@ Line2'''
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
logs = self.client.logs(id, tail=0)
- self.assertEqual(logs, ''.encode(encoding='ascii'))
+ assert logs == ''.encode(encoding='ascii')
+
+ @requires_api_version('1.35')
+ def test_logs_with_until(self):
+ snippet = 'Shanghai Teahouse (Hong Meiling)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo "{0}"'.format(snippet)
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+ assert exitcode == 0
+ logs_until_1 = self.client.logs(container, until=1)
+ assert logs_until_1 == b''
+ logs_until_now = self.client.logs(container, datetime.now())
+ assert logs_until_now == (snippet + '\n').encode(encoding='ascii')
-class DiffTest(helpers.BaseTestCase):
+class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
- self.assertEqual(len(test_diff), 1)
- self.assertIn('Kind', test_diff[0])
- self.assertEqual(test_diff[0]['Kind'], 1)
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
def test_diff_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
- exitcode = self.client.wait(id)
- self.assertEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
- self.assertEqual(len(test_diff), 1)
- self.assertIn('Kind', test_diff[0])
- self.assertEqual(test_diff[0]['Kind'], 1)
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
-class StopTest(helpers.BaseTestCase):
+class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -780,26 +979,26 @@ class StopTest(helpers.BaseTestCase):
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'Running' in state
+ assert state['Running'] is False
def test_stop_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
- self.assertIn('Id', container)
+ assert 'Id' in container
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'Running' in state
+ assert state['Running'] is False
-class KillTest(helpers.BaseTestCase):
+class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -807,12 +1006,12 @@ class KillTest(helpers.BaseTestCase):
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
def test_kill_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
@@ -821,12 +1020,12 @@ class KillTest(helpers.BaseTestCase):
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
def test_kill_with_signal(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
@@ -835,48 +1034,48 @@ class KillTest(helpers.BaseTestCase):
self.client.kill(
id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9
)
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
def test_kill_with_signal_name(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
def test_kill_with_signal_integer(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
- exitcode = self.client.wait(id)
- self.assertNotEqual(exitcode, 0)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertNotEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], False, state)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
-class PortTest(helpers.BaseTestCase):
+class PortTest(BaseAPIIntegrationTest):
def test_port(self):
port_bindings = {
@@ -901,13 +1100,13 @@ class PortTest(helpers.BaseTestCase):
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
- self.assertEqual(ip, port_bindings[port][0])
- self.assertEqual(host_port, port_bindings[port][1])
+ assert ip == port_bindings[port][0]
+ assert host_port == port_bindings[port][1]
self.client.kill(id)
-class ContainerTopTest(helpers.BaseTestCase):
+class ContainerTopTest(BaseAPIIntegrationTest):
def test_top(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60']
@@ -938,57 +1137,56 @@ class ContainerTopTest(helpers.BaseTestCase):
self.client.start(container)
res = self.client.top(container, 'waux')
- self.assertEqual(
- res['Titles'],
- ['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
- 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'],
- )
- self.assertEqual(len(res['Processes']), 1)
- self.assertEqual(res['Processes'][0][10], 'sleep 60')
+ assert res['Titles'] == [
+ 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
+ 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'
+ ]
+ assert len(res['Processes']) == 1
+ assert res['Processes'][0][10] == 'sleep 60'
-class RestartContainerTest(helpers.BaseTestCase):
+class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
- self.assertIn('State', info)
- self.assertIn('StartedAt', info['State'])
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
- self.assertIn('State', info2)
- self.assertIn('StartedAt', info2['State'])
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
start_time2 = info2['State']['StartedAt']
- self.assertNotEqual(start_time1, start_time2)
- self.assertIn('Running', info2['State'])
- self.assertEqual(info2['State']['Running'], True)
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
self.client.kill(id)
def test_restart_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
- self.assertIn('Id', container)
+ assert 'Id' in container
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
- self.assertIn('State', info)
- self.assertIn('StartedAt', info['State'])
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
- self.assertIn('State', info2)
- self.assertIn('StartedAt', info2['State'])
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
start_time2 = info2['State']['StartedAt']
- self.assertNotEqual(start_time1, start_time2)
- self.assertIn('Running', info2['State'])
- self.assertEqual(info2['State']['Running'], True)
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
self.client.kill(id)
-class RemoveContainerTest(helpers.BaseTestCase):
+class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
@@ -997,7 +1195,7 @@ class RemoveContainerTest(helpers.BaseTestCase):
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
def test_remove_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['true'])
@@ -1007,10 +1205,10 @@ class RemoveContainerTest(helpers.BaseTestCase):
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
-class AttachContainerTest(helpers.BaseTestCase):
+class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
@@ -1018,7 +1216,7 @@ class AttachContainerTest(helpers.BaseTestCase):
self.tmp_containers.append(id)
self.client.start(id)
sock = self.client.attach_socket(container, ws=False)
- self.assertTrue(sock.fileno() > -1)
+ assert sock.fileno() > -1
def test_run_container_reading_socket(self):
line = 'hi there and stuff and things, words!'
@@ -1026,22 +1224,100 @@ class AttachContainerTest(helpers.BaseTestCase):
command = "printf '{0}'".format(line)
container = self.client.create_container(BUSYBOX, command,
detach=True, tty=False)
- ident = container['Id']
- self.tmp_containers.append(ident)
+ self.tmp_containers.append(container)
opts = {"stdout": 1, "stream": 1, "logs": 1}
- pty_stdout = self.client.attach_socket(ident, opts)
+ pty_stdout = self.client.attach_socket(container, opts)
self.addCleanup(pty_stdout.close)
- self.client.start(ident)
+ self.client.start(container)
next_size = next_frame_size(pty_stdout)
- self.assertEqual(next_size, len(line))
+ assert next_size == len(line)
data = read_exactly(pty_stdout, next_size)
- self.assertEqual(data.decode('utf-8'), line)
+ assert data.decode('utf-8') == line
+ def test_attach_no_stream(self):
+ container = self.client.create_container(
+ BUSYBOX, 'echo hello'
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ output = self.client.attach(container, stream=False, logs=True)
+ assert output == 'hello\n'.encode(encoding='ascii')
+
+ @pytest.mark.timeout(5)
+ def test_attach_stream_and_cancel(self):
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo hello && sleep 60"',
+ tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ output = self.client.attach(container, stream=True, logs=True)
+
+ threading.Timer(1, output.close).start()
+
+ lines = []
+ for line in output:
+ lines.append(line)
+
+ assert len(lines) == 1
+ assert lines[0] == 'hello\r\n'.encode(encoding='ascii')
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
-class PauseTest(helpers.BaseTestCase):
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True, 'detachKeys': 'ctrl-x'}
+ )
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
+
+
+class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -1049,29 +1325,44 @@ class PauseTest(helpers.BaseTestCase):
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], True)
- self.assertIn('Paused', state)
- self.assertEqual(state['Paused'], True)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is True
self.client.unpause(id)
container_info = self.client.inspect_container(id)
- self.assertIn('State', container_info)
+ assert 'State' in container_info
state = container_info['State']
- self.assertIn('ExitCode', state)
- self.assertEqual(state['ExitCode'], 0)
- self.assertIn('Running', state)
- self.assertEqual(state['Running'], True)
- self.assertIn('Paused', state)
- self.assertEqual(state['Paused'], False)
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is False
+
+
+class PruneTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.25')
+ def test_prune_containers(self):
+ container1 = self.client.create_container(
+ BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
+ )
+ container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.client.start(container1)
+ self.client.start(container2)
+ self.client.wait(container1)
+ result = self.client.prune_containers()
+ assert container1['Id'] in result['ContainersDeleted']
+ assert result['SpaceReclaimed'] > 0
+ assert container2['Id'] not in result['ContainersDeleted']
-class GetContainerStatsTest(helpers.BaseTestCase):
- @requires_api_version('1.19')
+class GetContainerStatsTest(BaseAPIIntegrationTest):
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
@@ -1081,12 +1372,11 @@ class GetContainerStatsTest(helpers.BaseTestCase):
response = self.client.stats(container, stream=0)
self.client.kill(container)
- self.assertEqual(type(response), dict)
+ assert type(response) == dict
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
- self.assertIn(key, response)
+ assert key in response
- @requires_api_version('1.17')
def test_get_container_stats_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
@@ -1095,13 +1385,13 @@ class GetContainerStatsTest(helpers.BaseTestCase):
self.client.start(container)
stream = self.client.stats(container)
for chunk in stream:
- self.assertEqual(type(chunk), dict)
+ assert type(chunk) == dict
for key in ['read', 'network', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
- self.assertIn(key, chunk)
+ assert key in chunk
-class ContainerUpdateTest(helpers.BaseTestCase):
+class ContainerUpdateTest(BaseAPIIntegrationTest):
@requires_api_version('1.22')
def test_update_container(self):
old_mem_limit = 400 * 1024 * 1024
@@ -1115,11 +1405,40 @@ class ContainerUpdateTest(helpers.BaseTestCase):
self.client.start(container)
self.client.update_container(container, mem_limit=new_mem_limit)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
+ assert inspect_data['HostConfig']['Memory'] == new_mem_limit
+
+ @requires_api_version('1.23')
+ def test_restart_policy_update(self):
+ old_restart_policy = {
+ 'MaximumRetryCount': 0,
+ 'Name': 'always'
+ }
+ new_restart_policy = {
+ 'MaximumRetryCount': 42,
+ 'Name': 'on-failure'
+ }
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'],
+ host_config=self.client.create_host_config(
+ restart_policy=old_restart_policy
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.update_container(container,
+ restart_policy=new_restart_policy)
+ inspect_data = self.client.inspect_container(container)
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'] ==
+ new_restart_policy['MaximumRetryCount']
+ )
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['Name'] ==
+ new_restart_policy['Name']
+ )
-class ContainerCPUTest(helpers.BaseTestCase):
- @requires_api_version('1.18')
+class ContainerCPUTest(BaseAPIIntegrationTest):
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
@@ -1130,9 +1449,8 @@ class ContainerCPUTest(helpers.BaseTestCase):
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512)
+ assert inspect_data['HostConfig']['CpuShares'] == 512
- @requires_api_version('1.18')
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
@@ -1143,4 +1461,55 @@ class ContainerCPUTest(helpers.BaseTestCase):
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
- self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
+ assert inspect_data['HostConfig']['CpusetCpus'] == cpuset_cpus
+
+ @requires_api_version('1.25')
+ def test_create_with_runtime(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], runtime='runc'
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Runtime'] == 'runc'
+
+
+class LinkTest(BaseAPIIntegrationTest):
+ def test_remove_link(self):
+ # Create containers
+ container1 = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ container1_id = container1['Id']
+ self.tmp_containers.append(container1_id)
+ self.client.start(container1_id)
+
+ # Create Link
+ # we don't want the first /
+ link_path = self.client.inspect_container(container1_id)['Name'][1:]
+ link_alias = 'mylink'
+
+ container2 = self.client.create_container(
+ BUSYBOX, 'cat', host_config=self.client.create_host_config(
+ links={link_path: link_alias}
+ )
+ )
+ container2_id = container2['Id']
+ self.tmp_containers.append(container2_id)
+ self.client.start(container2_id)
+
+ # Remove link
+ linked_name = self.client.inspect_container(container2_id)['Name'][1:]
+ link_name = '%s/%s' % (linked_name, link_alias)
+ self.client.remove_container(link_name, link=True)
+
+ # Link is gone
+ containers = self.client.containers(all=True)
+ retrieved = [x for x in containers if link_name in x['Names']]
+ assert len(retrieved) == 0
+
+ # Containers are still there
+ retrieved = [
+ x for x in containers if x['Id'].startswith(container1_id) or
+ x['Id'].startswith(container2_id)
+ ]
+ assert len(retrieved) == 2
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
new file mode 100644
index 0000000..1a5a4e5
--- /dev/null
+++ b/tests/integration/api_exec_test.py
@@ -0,0 +1,205 @@
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
+
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
+
+
+class ExecTest(BaseAPIIntegrationTest):
+ def test_execute_command(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, ['echo', 'hello'])
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello\n'
+
+ def test_exec_command_string(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'echo hello world')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello world\n'
+
+ def test_exec_command_as_user(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'whoami', user='default')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'default\n'
+
+ def test_exec_command_as_root(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'whoami')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'root\n'
+
+ def test_exec_command_streaming(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+
+ exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
+ assert 'Id' in exec_id
+
+ res = b''
+ for chunk in self.client.exec_start(exec_id, stream=True):
+ res += chunk
+ assert res == b'hello\nworld\n'
+
+ def test_exec_start_socket(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ container_id = container['Id']
+ self.client.start(container_id)
+ self.tmp_containers.append(container_id)
+
+ line = 'yay, interactive exec!'
+ # `echo` appends CRLF, `printf` doesn't
+ exec_id = self.client.exec_create(
+ container_id, ['printf', line], tty=True)
+ assert 'Id' in exec_id
+
+ socket = self.client.exec_start(exec_id, socket=True)
+ self.addCleanup(socket.close)
+
+ next_size = next_frame_size(socket)
+ assert next_size == len(line)
+ data = read_exactly(socket, next_size)
+ assert data.decode('utf-8') == line
+
+ def test_exec_start_detached(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ container_id = container['Id']
+ self.client.start(container_id)
+ self.tmp_containers.append(container_id)
+
+ exec_id = self.client.exec_create(
+ container_id, ['printf', "asdqwe"])
+ assert 'Id' in exec_id
+
+ response = self.client.exec_start(exec_id, detach=True)
+
+ assert response == ""
+
+ def test_exec_inspect(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
+ assert 'Id' in exec_id
+ self.client.exec_start(exec_id)
+ exec_info = self.client.exec_inspect(exec_id)
+ assert 'ExitCode' in exec_info
+ assert exec_info['ExitCode'] != 0
+
+ @requires_api_version('1.25')
+ def test_exec_command_with_env(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'env', environment=["X=Y"])
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert b'X=Y\n' in exec_log
+
+ @requires_api_version('1.35')
+ def test_exec_command_with_workdir(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ res = self.client.exec_create(container, 'pwd', workdir='/var/www')
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'/var/www\n'
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat',
+ stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
new file mode 100644
index 0000000..5dbac37
--- /dev/null
+++ b/tests/integration/api_healthcheck_test.py
@@ -0,0 +1,68 @@
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from .. import helpers
+
+SECOND = 1000000000
+
+
+def wait_on_health_status(client, container, status):
+ def condition():
+ res = client.inspect_container(container)
+ return res['State']['Health']['Status'] == status
+ return helpers.wait_on_condition(condition)
+
+
+class HealthcheckTest(BaseAPIIntegrationTest):
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_shell_command(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ self.tmp_containers.append(container)
+
+ res = self.client.inspect_container(container)
+ assert res['Config']['Healthcheck']['Test'] == [
+ 'CMD-SHELL', 'echo "hello world"'
+ ]
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_passes(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="true",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_fails(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="false",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "unhealthy")
+
+ @helpers.requires_api_version('1.29')
+ def test_healthcheck_start_period(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="echo 'x' >> /counter.txt && "
+ "test `cat /counter.txt | wc -l` -ge 3",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ start_period=3 * SECOND
+ )
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
diff --git a/tests/integration/image_test.py b/tests/integration/api_image_test.py
index 24800f2..050e7f3 100644
--- a/tests/integration/image_test.py
+++ b/tests/integration/api_image_test.py
@@ -14,79 +14,84 @@ from six.moves import socketserver
import docker
-from .. import helpers
+from ..helpers import requires_api_version, requires_experimental
+from .base import BaseAPIIntegrationTest, BUSYBOX
-BUSYBOX = helpers.BUSYBOX
-
-class ListImagesTest(helpers.BaseTestCase):
+class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
- self.assertIn('Id', res1[0])
+ assert 'Id' in res1[0]
res10 = res1[0]
- self.assertIn('Created', res10)
- self.assertIn('RepoTags', res10)
+ assert 'Created' in res10
+ assert 'RepoTags' in res10
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
- self.assertEqual(len(distinct), self.client.info()['Images'])
+ assert len(distinct) == self.client.info()['Images']
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
- self.assertEqual(type(res1[0]), six.text_type)
+ assert type(res1[0]) == six.text_type
-class PullImageTest(helpers.BaseTestCase):
+class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- res = self.client.pull('hello-world')
+ res = self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
- self.assertEqual(type(res), six.text_type)
- self.assertGreaterEqual(
- len(self.client.images('hello-world')), 1
- )
+ assert type(res) == six.text_type
+ assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
- self.assertIn('Id', img_info)
+ assert 'Id' in img_info
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- stream = self.client.pull('hello-world', stream=True, decode=True)
+ stream = self.client.pull(
+ 'hello-world', tag='latest', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
- self.assertGreaterEqual(
- len(self.client.images('hello-world')), 1
- )
+ assert len(self.client.images('hello-world')) >= 1
img_info = self.client.inspect_image('hello-world')
- self.assertIn('Id', img_info)
+ assert 'Id' in img_info
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_pull_invalid_platform(self):
+ with pytest.raises(docker.errors.APIError) as excinfo:
+ self.client.pull('hello-world', platform='foobar')
-class CommitTest(helpers.BaseTestCase):
+ assert excinfo.value.status_code == 500
+ assert 'invalid platform' in excinfo.exconly()
+
+
+class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
- self.assertIn('Id', res)
+ assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
- self.assertIn('Container', img)
- self.assertTrue(img['Container'].startswith(id))
- self.assertIn('ContainerConfig', img)
- self.assertIn('Image', img['ContainerConfig'])
- self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
+ assert 'Container' in img
+ assert img['Container'].startswith(id)
+ assert 'ContainerConfig' in img
+ assert 'Image' in img['ContainerConfig']
+ assert BUSYBOX == img['ContainerConfig']['Image']
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
- self.assertIn('Parent', img)
- self.assertEqual(img['Parent'], busybox_id)
+ assert 'Parent' in img
+ assert img['Parent'] == busybox_id
def test_commit_with_changes(self):
cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
@@ -103,23 +108,24 @@ class CommitTest(helpers.BaseTestCase):
assert img['Config']['Cmd'] == ['bash']
-class RemoveImageTest(helpers.BaseTestCase):
+class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
- self.assertIn('Id', res)
+ assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
- self.client.remove_image(img_id, force=True)
+ logs = self.client.remove_image(img_id, force=True)
+ assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
- self.assertEqual(len(res), 0)
+ assert len(res) == 0
-class ImportImageTest(helpers.BaseTestCase):
+class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
@@ -170,7 +176,7 @@ class ImportImageTest(helpers.BaseTestCase):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -185,9 +191,9 @@ class ImportImageTest(helpers.BaseTestCase):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -200,9 +206,9 @@ class ImportImageTest(helpers.BaseTestCase):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
@@ -248,6 +254,19 @@ class ImportImageTest(helpers.BaseTestCase):
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
+ # Docs say output is available in 1.23, but this test fails on 1.12.0
+ @requires_api_version('1.24')
+ def test_get_load_image(self):
+ test_img = 'hello-world:latest'
+ self.client.pull(test_img)
+ data = self.client.get_image(test_img)
+ assert data
+ output = self.client.load_image(data)
+ assert any([
+ line for line in output
+ if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
+ ])
+
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
@@ -282,8 +301,68 @@ class ImportImageTest(helpers.BaseTestCase):
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
- self.assertNotIn('error', result)
+ assert 'error' not in result
- self.assertIn('status', result)
+ assert 'status' in result
img_id = result['status']
self.tmp_imgs.append(img_id)
+
+
+@requires_api_version('1.25')
+class PruneImagesTest(BaseAPIIntegrationTest):
+ def test_prune_images(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+
+ # Ensure busybox does not get pruned
+ ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.tmp_containers.append(ctnr)
+
+ self.client.pull('hello-world', tag='latest')
+ self.tmp_imgs.append('hello-world')
+ img_id = self.client.inspect_image('hello-world')['Id']
+ result = self.client.prune_images()
+ assert img_id not in [
+ img.get('Deleted') for img in result.get('ImagesDeleted') or []
+ ]
+ result = self.client.prune_images({'dangling': False})
+ assert result['SpaceReclaimed'] > 0
+ assert 'hello-world:latest' in [
+ img.get('Untagged') for img in result['ImagesDeleted']
+ ]
+ assert img_id in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
+
+
+class SaveLoadImagesTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.23')
+ def test_get_image_load_image(self):
+ with tempfile.TemporaryFile() as f:
+ stream = self.client.get_image(BUSYBOX)
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = self.client.load_image(f.read())
+
+ success = False
+ result_line = 'Loaded image: {}\n'.format(BUSYBOX)
+ for data in result:
+ print(data)
+ if 'stream' in data:
+ if data['stream'] == result_line:
+ success = True
+ break
+ assert success is True
+
+
+@requires_api_version('1.30')
+class InspectDistributionTest(BaseAPIIntegrationTest):
+ def test_inspect_distribution(self):
+ data = self.client.inspect_distribution('busybox:latest')
+ assert data is not None
+ assert 'Platforms' in data
+ assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
diff --git a/tests/integration/network_test.py b/tests/integration/api_network_test.py
index 5f852ab..b6726d0 100644
--- a/tests/integration/network_test.py
+++ b/tests/integration/api_network_test.py
@@ -1,56 +1,52 @@
-import random
-
import docker
-from docker.utils import create_ipam_config
-from docker.utils import create_ipam_pool
+from docker.types import IPAMConfig, IPAMPool
import pytest
-from .. import helpers
-from ..base import requires_api_version
+from ..helpers import random_name, requires_api_version
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+class TestNetworks(BaseAPIIntegrationTest):
+ def tearDown(self):
+ super(TestNetworks, self).tearDown()
+ self.client.leave_swarm(force=True)
-class TestNetworks(helpers.BaseTestCase):
def create_network(self, *args, **kwargs):
- net_name = u'dockerpy{}'.format(random.getrandbits(24))[:14]
+ net_name = random_name()
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
- @requires_api_version('1.21')
def test_list_networks(self):
networks = self.client.networks()
- initial_size = len(networks)
net_name, net_id = self.create_network()
networks = self.client.networks()
- self.assertEqual(len(networks), initial_size + 1)
- self.assertTrue(net_id in [n['Id'] for n in networks])
+ assert net_id in [n['Id'] for n in networks]
networks_by_name = self.client.networks(names=[net_name])
- self.assertEqual([n['Id'] for n in networks_by_name], [net_id])
+ assert [n['Id'] for n in networks_by_name] == [net_id]
networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
- self.assertEqual([n['Id'] for n in networks_by_partial_id], [net_id])
+ assert [n['Id'] for n in networks_by_partial_id] == [net_id]
- @requires_api_version('1.21')
def test_inspect_network(self):
net_name, net_id = self.create_network()
net = self.client.inspect_network(net_id)
- self.assertEqual(net['Id'], net_id)
- self.assertEqual(net['Name'], net_name)
- self.assertEqual(net['Driver'], 'bridge')
- self.assertEqual(net['Scope'], 'local')
- self.assertEqual(net['IPAM']['Driver'], 'default')
+ assert net['Id'] == net_id
+ assert net['Name'] == net_name
+ assert net['Driver'] == 'bridge'
+ assert net['Scope'] == 'local'
+ assert net['IPAM']['Driver'] == 'default'
- @requires_api_version('1.21')
def test_create_network_with_ipam_config(self):
_, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16",
iprange="172.28.5.0/24",
gateway="172.28.5.254",
@@ -82,47 +78,39 @@ class TestNetworks(helpers.BaseTestCase):
},
}]
- @requires_api_version('1.21')
def test_create_network_with_host_driver_fails(self):
- net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14]
-
with pytest.raises(docker.errors.APIError):
- self.client.create_network(net_name, driver='host')
+ self.client.create_network(random_name(), driver='host')
- @requires_api_version('1.21')
def test_remove_network(self):
- initial_size = len(self.client.networks())
-
net_name, net_id = self.create_network()
- self.assertEqual(len(self.client.networks()), initial_size + 1)
+ assert net_name in [n['Name'] for n in self.client.networks()]
self.client.remove_network(net_id)
- self.assertEqual(len(self.client.networks()), initial_size)
+ assert net_name not in [n['Name'] for n in self.client.networks()]
- @requires_api_version('1.21')
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
- [container['Id']]
- )
+ assert list(network_data['Containers'].keys()) == [
+ container['Id']
+ ]
with pytest.raises(docker.errors.APIError):
self.client.connect_container_to_network(container, net_id)
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(container, net_id)
@@ -131,23 +119,21 @@ class TestNetworks(helpers.BaseTestCase):
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
+ assert list(network_data['Containers'].keys()) == \
[container['Id']]
- )
self.client.disconnect_container_from_network(container, net_id, True)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(
@@ -158,7 +144,7 @@ class TestNetworks(helpers.BaseTestCase):
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -171,12 +157,11 @@ class TestNetworks(helpers.BaseTestCase):
assert 'foo' in aliases
assert 'bar' in aliases
- @requires_api_version('1.21')
def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
@@ -184,20 +169,19 @@ class TestNetworks(helpers.BaseTestCase):
self.client.start(container)
network_data = self.client.inspect_network(net_id)
- self.assertEqual(
- list(network_data['Containers'].keys()),
- [container['Id']])
+ assert list(network_data['Containers'].keys()) == \
+ [container['Id']]
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
- self.assertFalse(network_data.get('Containers'))
+ assert not network_data.get('Containers')
@requires_api_version('1.22')
def test_create_with_aliases(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
@@ -221,13 +205,13 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_create_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="132.124.0.0/16")],
+ pool_configs=[IPAMPool(subnet="132.124.0.0/16")],
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -238,25 +222,22 @@ class TestNetworks(helpers.BaseTestCase):
self.tmp_containers.append(container)
self.client.start(container)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data[
- 'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
- 'IPv4Address'
- ],
- '132.124.0.23'
- )
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv4Address']\
+ == '132.124.0.23'
@requires_api_version('1.22')
def test_create_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="2001:389::1/64")],
+ pool_configs=[IPAMPool(subnet="2001:389::1/64")],
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -267,19 +248,16 @@ class TestNetworks(helpers.BaseTestCase):
self.tmp_containers.append(container)
self.client.start(container)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data[
- 'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
- 'IPv6Address'
- ],
- '2001:389::f00d'
- )
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv6Address']\
+ == '2001:389::f00d'
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
- 'busybox', 'top',
+ BUSYBOX, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
@@ -310,10 +288,12 @@ class TestNetworks(helpers.BaseTestCase):
}),
)
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data['NetworkSettings']['Networks'][net_name]['Links'],
- ['docker-py-test-upstream:bar'])
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
self.create_and_start(
name='docker-py-test-upstream',
@@ -322,10 +302,9 @@ class TestNetworks(helpers.BaseTestCase):
self.execute(container, ['nslookup', 'bar'])
- @requires_api_version('1.21')
def test_create_check_duplicate(self):
net_name, net_id = self.create_network()
- with self.assertRaises(docker.errors.APIError):
+ with pytest.raises(docker.errors.APIError):
self.client.create_network(net_name, check_duplicate=True)
net_id = self.client.create_network(net_name, check_duplicate=False)
self.tmp_networks.append(net_id['Id'])
@@ -342,10 +321,12 @@ class TestNetworks(helpers.BaseTestCase):
container, net_name,
links=[('docker-py-test-upstream', 'bar')])
- container_data = self.client.inspect_container(container)
- self.assertEqual(
- container_data['NetworkSettings']['Networks'][net_name]['Links'],
- ['docker-py-test-upstream:bar'])
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
self.create_and_start(
name='docker-py-test-upstream',
@@ -357,10 +338,10 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_connect_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16", iprange="172.28.5.0/24",
gateway="172.28.5.254"
)
@@ -378,17 +359,15 @@ class TestNetworks(helpers.BaseTestCase):
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
- self.assertEqual(
- net_data['IPAMConfig']['IPv4Address'], '172.28.5.24'
- )
+ assert net_data['IPAMConfig']['IPv4Address'] == '172.28.5.24'
@requires_api_version('1.22')
def test_connect_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="2001:389::1/64", iprange="2001:389::0/96",
gateway="2001:389::ffff"
)
@@ -406,9 +385,7 @@ class TestNetworks(helpers.BaseTestCase):
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
- self.assertEqual(
- net_data['IPAMConfig']['IPv6Address'], '2001:389::f00d'
- )
+ assert net_data['IPAMConfig']['IPv6Address'] == '2001:389::f00d'
@requires_api_version('1.23')
def test_create_internal_networks(self):
@@ -436,6 +413,62 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.23')
def test_create_network_ipv6_enabled(self):
- _, net_id = self.create_network(enable_ipv6=True)
+ _, net_id = self.create_network(
+ enable_ipv6=True, ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="2001:389::1/64", iprange="2001:389::0/96",
+ gateway="2001:389::ffff"
+ )
+ ]
+ )
+ )
net = self.client.inspect_network(net_id)
assert net['EnableIPv6'] is True
+
+ @requires_api_version('1.25')
+ def test_create_network_attachable(self):
+ assert self.init_swarm()
+ _, net_id = self.create_network(driver='overlay', attachable=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Attachable'] is True
+
+ @requires_api_version('1.29')
+ def test_create_network_ingress(self):
+ assert self.init_swarm()
+ self.client.remove_network('ingress')
+ _, net_id = self.create_network(driver='overlay', ingress=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Ingress'] is True
+
+ @requires_api_version('1.25')
+ def test_prune_networks(self):
+ net_name, _ = self.create_network()
+ result = self.client.prune_networks()
+ assert net_name in result['NetworksDeleted']
+
+ @requires_api_version('1.31')
+ def test_create_inspect_network_with_scope(self):
+ assert self.init_swarm()
+ net_name_loc, net_id_loc = self.create_network(scope='local')
+
+ assert self.client.inspect_network(net_name_loc)
+ assert self.client.inspect_network(net_name_loc, scope='local')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_loc, scope='global')
+
+ net_name_swarm, net_id_swarm = self.create_network(
+ driver='overlay', scope='swarm'
+ )
+
+ assert self.client.inspect_network(net_name_swarm)
+ assert self.client.inspect_network(net_name_swarm, scope='swarm')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_swarm, scope='local')
+
+ def test_create_remove_network_with_space_in_name(self):
+ net_id = self.client.create_network('test 01')
+ self.tmp_networks.append(net_id)
+ assert self.client.inspect_network('test 01')
+ assert self.client.remove_network('test 01') is None # does not raise
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
new file mode 100644
index 0000000..433d44d
--- /dev/null
+++ b/tests/integration/api_plugin_test.py
@@ -0,0 +1,145 @@
+import os
+
+import docker
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from ..helpers import requires_api_version
+
+SSHFS = 'vieux/sshfs:latest'
+
+
+@requires_api_version('1.25')
+class PluginTest(BaseAPIIntegrationTest):
+ @classmethod
+ def teardown_class(cls):
+ c = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60,
+ **docker.utils.kwargs_from_env()
+ )
+ try:
+ c.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def teardown_method(self, method):
+ try:
+ self.client.disable_plugin(SSHFS)
+ except docker.errors.APIError:
+ pass
+
+ for p in self.tmp_plugins:
+ try:
+ self.client.remove_plugin(p, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def ensure_plugin_installed(self, plugin_name):
+ try:
+ return self.client.inspect_plugin(plugin_name)
+ except docker.errors.NotFound:
+ prv = self.client.plugin_privileges(plugin_name)
+ for d in self.client.pull_plugin(plugin_name, prv):
+ pass
+ return self.client.inspect_plugin(plugin_name)
+
+ def test_enable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ with pytest.raises(docker.errors.APIError):
+ self.client.enable_plugin(SSHFS)
+
+ def test_disable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ self.client.disable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is False
+ with pytest.raises(docker.errors.APIError):
+ self.client.disable_plugin(SSHFS)
+
+ def test_inspect_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.inspect_plugin(SSHFS)
+ assert 'Config' in data
+ assert 'Name' in data
+ assert data['Name'] == SSHFS
+
+ def test_plugin_privileges(self):
+ prv = self.client.plugin_privileges(SSHFS)
+ assert isinstance(prv, list)
+ for item in prv:
+ assert 'Name' in item
+ assert 'Value' in item
+ assert 'Description' in item
+
+ def test_list_plugins(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.plugins()
+ assert len(data) > 0
+ plugin = [p for p in data if p['Name'] == SSHFS][0]
+ assert 'Config' in plugin
+
+ def test_configure_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ self.client.configure_plugin(SSHFS, {
+ 'DEBUG': '1'
+ })
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'Env' in pl_data['Settings']
+ assert 'DEBUG=1' in pl_data['Settings']['Env']
+
+ self.client.configure_plugin(SSHFS, ['DEBUG=0'])
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'DEBUG=0' in pl_data['Settings']['Env']
+
+ def test_remove_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.remove_plugin(SSHFS) is True
+
+ def test_force_remove_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ self.client.enable_plugin(SSHFS)
+ assert self.client.inspect_plugin(SSHFS)['Enabled'] is True
+ assert self.client.remove_plugin(SSHFS, force=True) is True
+
+ def test_install_plugin(self):
+ try:
+ self.client.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.pull_plugin(SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ @requires_api_version('1.26')
+ def test_upgrade_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.upgrade_plugin(SSHFS, SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ def test_create_plugin(self):
+ plugin_data_dir = os.path.join(
+ os.path.dirname(__file__), 'testdata/dummy-plugin'
+ )
+ assert self.client.create_plugin(
+ 'docker-sdk-py/dummy', plugin_data_dir
+ )
+ self.tmp_plugins.append('docker-sdk-py/dummy')
+ data = self.client.inspect_plugin('docker-sdk-py/dummy')
+ assert data['Config']['Entrypoint'] == ['/dummy']
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
new file mode 100644
index 0000000..b3d93b8
--- /dev/null
+++ b/tests/integration/api_secret_test.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.25')
+class SecretAPITest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def test_create_secret(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_secret_unicode_data(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == secret_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ assert self.client.remove_secret(secret_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_secret(secret_id)
+
+ def test_list_secrets(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ data = self.client.secrets(filters={'names': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == secret_id['ID']
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
new file mode 100644
index 0000000..85f9dcc
--- /dev/null
+++ b/tests/integration/api_service_test.py
@@ -0,0 +1,1255 @@
+# -*- coding: utf-8 -*-
+
+import random
+import time
+
+import docker
+import pytest
+import six
+
+from ..helpers import (
+ force_leave_swarm, requires_api_version, requires_experimental
+)
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class ServiceTest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def tearDown(self):
+ for service in self.client.services(filters={'name': 'dockerpytest_'}):
+ try:
+ self.client.remove_service(service['ID'])
+ except docker.errors.APIError:
+ pass
+ super(ServiceTest, self).tearDown()
+
+ def get_service_name(self):
+ return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+ def get_service_container(self, service_name, attempts=20, interval=0.5,
+ include_stopped=False):
+ # There is some delay between the service's creation and the creation
+ # of the service's containers. This method deals with the uncertainty
+ # when trying to retrieve the container associated with a service.
+ while True:
+ containers = self.client.containers(
+ filters={'name': [service_name]}, quiet=True,
+ all=include_stopped
+ )
+ if len(containers) > 0:
+ return containers[0]
+ attempts -= 1
+ if attempts <= 0:
+ return None
+ time.sleep(interval)
+
+ def create_simple_service(self, name=None, labels=None):
+ if name:
+ name = 'dockerpytest_{0}'.format(name)
+ else:
+ name = self.get_service_name()
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ return name, self.client.create_service(
+ task_tmpl, name=name, labels=labels
+ )
+
+ @requires_api_version('1.24')
+ def test_list_services(self):
+ services = self.client.services()
+ assert isinstance(services, list)
+
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+ self.create_simple_service()
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 1
+ assert 'dockerpytest_' in test_services[0]['Spec']['Name']
+
+ @requires_api_version('1.24')
+ def test_list_services_filter_by_label(self):
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 0
+ self.create_simple_service(labels={'test_label': 'testing'})
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 1
+ assert test_services[0]['Spec']['Labels']['test_label'] == 'testing'
+
+ def test_inspect_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_inspect_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_name)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ @requires_api_version('1.29')
+ def test_inspect_service_insert_defaults(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ svc_info_defaults = self.client.inspect_service(
+ svc_id, insert_defaults=True
+ )
+ assert svc_info != svc_info_defaults
+ assert 'RollbackConfig' in svc_info_defaults['Spec']
+ assert 'RollbackConfig' not in svc_info['Spec']
+
+ def test_remove_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_id)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_remove_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_name)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_create_service_simple(self):
+ name, svc_id = self.create_simple_service()
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+
+ @requires_api_version('1.25')
+ @requires_experimental(until='1.29')
+ def test_service_logs(self):
+ name, svc_id = self.create_simple_service()
+ assert self.get_service_container(name, include_stopped=True)
+ attempts = 20
+ while True:
+ if attempts == 0:
+ self.fail('No service logs produced by endpoint')
+ return
+ logs = self.client.service_logs(svc_id, stdout=True, is_tty=False)
+ try:
+ log_line = next(logs)
+ except StopIteration:
+ attempts -= 1
+ time.sleep(0.1)
+ continue
+ else:
+ break
+
+ if six.PY3:
+ log_line = log_line.decode('utf-8')
+ assert 'hello\n' in log_line
+
+ def test_create_service_custom_log_driver(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ log_cfg = docker.types.DriverConfig('none')
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, log_driver=log_cfg
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'LogDriver' in res_template
+ assert 'Name' in res_template['LogDriver']
+ assert res_template['LogDriver']['Name'] == 'none'
+
+ def test_create_service_with_volume_mount(self):
+ vol_name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['ls'],
+ mounts=[
+ docker.types.Mount(target='/test', source=vol_name)
+ ]
+ )
+ self.tmp_volumes.append(vol_name)
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Mounts' in cspec
+ assert len(cspec['Mounts']) == 1
+ mount = cspec['Mounts'][0]
+ assert mount['Target'] == '/test'
+ assert mount['Source'] == vol_name
+ assert mount['Type'] == 'volume'
+
+ def test_create_service_with_resources_constraints(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ resources = docker.types.Resources(
+ cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
+ cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ assert res_template['Resources']['Limits'] == resources['Limits']
+ assert res_template['Resources']['Reservations'] == resources[
+ 'Reservations'
+ ]
+
+ def _create_service_with_generic_resources(self, generic_resources):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+
+ resources = docker.types.Resources(
+ generic_resources=generic_resources
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ return resources, self.client.inspect_service(svc_id)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_generic_resources(self):
+ successful = [{
+ 'input': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 1}},
+ {'NamedResourceSpec': {'Kind': 'gpu', 'Value': 'test'}}
+ ]}, {
+ 'input': {'gpu': 2, 'mpi': 'latest'},
+ 'expected': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 2}},
+ {'NamedResourceSpec': {'Kind': 'mpi', 'Value': 'latest'}}
+ ]}
+ ]
+
+ for test in successful:
+ t = test['input']
+ resrcs, svc_info = self._create_service_with_generic_resources(t)
+
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ res_reservations = res_template['Resources']['Reservations']
+ assert res_reservations == resrcs['Reservations']
+ assert 'GenericResources' in res_reservations
+
+ def _key(d, specs=('DiscreteResourceSpec', 'NamedResourceSpec')):
+ return [d.get(s, {}).get('Kind', '') for s in specs]
+
+ actual = res_reservations['GenericResources']
+ expected = test.get('expected', test['input'])
+ assert sorted(actual, key=_key) == sorted(expected, key=_key)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_invalid_generic_resources(self):
+ for test_input in ['1', 1.0, lambda: '1', {1, 2}]:
+ with pytest.raises(docker.errors.InvalidArgument):
+ self._create_service_with_generic_resources(test_input)
+
+ def test_create_service_with_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_update_config_monitor(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Monitor'] == uc['Monitor']
+ assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+
+ def test_create_service_with_restart_policy(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ policy = docker.types.RestartPolicy(
+ docker.types.RestartPolicy.condition_types.ANY,
+ delay=5, max_attempts=5
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, restart_policy=policy
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
+ assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
+
+ def test_create_service_with_custom_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ def test_create_service_with_placement(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=['node.id=={}'.format(node_id)]
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
+ {'Constraints': ['node.id=={}'.format(node_id)]})
+
+ def test_create_service_with_placement_object(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(
+ constraints=['node.id=={}'.format(node_id)]
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.30')
+ def test_create_service_with_placement_platform(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.27')
+ def test_create_service_with_placement_preferences(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(preferences=[
+ {'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
+ ])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ def test_create_service_with_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ @requires_api_version('1.32')
+ def test_create_service_with_endpoint_spec_host_publish_mode(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, None, 'host'),
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ assert len(ports) == 1
+ port = ports[0]
+ assert port['PublishedPort'] == 12357
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'tcp'
+ assert port['PublishMode'] == 'host'
+
+ def test_create_service_with_env(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Env' in con_spec
+ assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
+
+ @requires_api_version('1.29')
+ def test_create_service_with_update_order(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, order='start-first'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['Order'] == uc['Order']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_tty(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], tty=True
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'TTY' in con_spec
+ assert con_spec['TTY'] is True
+
+ @requires_api_version('1.25')
+ def test_create_service_with_tty_dict(self):
+ container_spec = {
+ 'Image': BUSYBOX,
+ 'Command': ['true'],
+ 'TTY': True
+ }
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'TTY' in con_spec
+ assert con_spec['TTY'] is True
+
+ def test_create_service_global_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, mode='global'
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Global' in svc_info['Spec']['Mode']
+
+ def test_create_service_replicated_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode('replicated', 5)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
+
+ @requires_api_version('1.25')
+ def test_update_service_force_update(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self.client.update_service(name, version_index, task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
+
+ @requires_api_version('1.25')
+ def test_create_service_with_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = b'phantasmagoria of flower view'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ assert self.client.exec_start(exec_id) == secret_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_unicode_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = u'東方花映塚'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ container_secret = self.client.exec_start(exec_id)
+ container_secret = container_secret.decode('utf-8')
+ assert container_secret == secret_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_config(self):
+ config_name = 'favorite_touhou'
+ config_data = b'phantasmagoria of flower view'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ assert self.client.exec_start(exec_id) == config_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_unicode_config(self):
+ config_name = 'favorite_touhou'
+ config_data = u'東方花映塚'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ container_config = self.client.exec_start(exec_id)
+ container_config = container_config.decode('utf-8')
+ assert container_config == config_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hosts(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hosts={
+ 'foobar': '127.0.0.1',
+ 'baz': '8.8.8.8',
+ }
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hosts' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ hosts = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
+ assert len(hosts) == 2
+ assert '127.0.0.1 foobar' in hosts
+ assert '8.8.8.8 baz' in hosts
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hostname(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hostname='foobar.baz.com'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hostname' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hostname'] ==
+ 'foobar.baz.com'
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_groups(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], groups=['shrinemaidens', 'youkais']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Groups' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ groups = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Groups']
+ assert len(groups) == 2
+ assert 'shrinemaidens' in groups
+ assert 'youkais' in groups
+
+ @requires_api_version('1.25')
+ def test_create_service_with_dns_config(self):
+ dns_config = docker.types.DNSConfig(
+ nameservers=['8.8.8.8', '8.8.4.4'],
+ search=['local'], options=['debug']
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], dns_config=dns_config
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'DNSConfig' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ dns_config ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ @requires_api_version('1.28')
+ def test_create_service_with_readonly(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], read_only=True
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'ReadOnly' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert svc_info['Spec']['TaskTemplate']['ContainerSpec']['ReadOnly']
+
+ @requires_api_version('1.28')
+ def test_create_service_with_stop_signal(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'StopSignal' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['StopSignal'] ==
+ 'SIGINT'
+ )
+
+ @requires_api_version('1.30')
+ def test_create_service_with_privileges(self):
+ priv = docker.types.Privileges(selinux_disable=True)
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], privileges=priv
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Privileges' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ privileges = (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Privileges']
+ )
+ assert privileges['SELinuxContext']['Disable'] is True
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_name(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode(mode='replicated', replicas=2)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+
+ def test_update_service_with_defaults_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ newer_index = svc_info['Version']['Index']
+ assert newer_index > new_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ def test_update_service_with_defaults_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ self._update_service(
+ svc_id, name, new_index, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}
+ ]
+
+ def test_update_service_with_defaults_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ print(svc_info)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ @requires_api_version('1.25')
+ def test_update_service_remove_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ 'Healthcheck' not in container_spec or
+ not container_spec['Healthcheck']
+ )
+
+ def test_update_service_remove_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={}, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert not svc_info['Spec'].get('Labels')
+
+ def test_update_service_remove_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert not container_spec.get('Labels')
+
+ @requires_api_version('1.29')
+ def test_update_service_with_network_change(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ net1 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[net1['Id']]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert len(svc_info['Spec']['Networks']) > 0
+ assert svc_info['Spec']['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ networks=[net2['Id']], fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ self._update_service(
+ svc_id, name, new_index, name=name, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'ContainerSpec' in task_template
+ new_spec = task_template['ContainerSpec']
+ assert 'Image' in new_spec
+ assert new_spec['Image'].split(':')[0] == 'busybox'
+ assert 'Command' in new_spec
+ assert new_spec['Command'] == ['echo', 'hello']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, networks=[net2['Id']]
+ )
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ def _update_service(self, svc_id, *args, **kwargs):
+ # service update tests seem to be a bit flaky
+ # give them a chance to retry the update with a new version index
+ try:
+ self.client.update_service(*args, **kwargs)
+ except docker.errors.APIError as e:
+ if e.explanation.endswith("update out of sequence"):
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ if len(args) > 1:
+ args = (args[0], version_index) + args[2:]
+ else:
+ kwargs['version'] = version_index
+
+ self.client.update_service(*args, **kwargs)
+ else:
+ raise
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
new file mode 100644
index 0000000..dbf3786
--- /dev/null
+++ b/tests/integration/api_swarm_test.py
@@ -0,0 +1,207 @@
+import copy
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+class SwarmTest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(SwarmTest, self).setUp()
+ force_leave_swarm(self.client)
+ self._unlock_key = None
+
+ def tearDown(self):
+ super(SwarmTest, self).tearDown()
+ try:
+ if self._unlock_key:
+ self.client.unlock_swarm(self._unlock_key)
+ except docker.errors.APIError:
+ pass
+
+ force_leave_swarm(self.client)
+
+ @requires_api_version('1.24')
+ def test_init_swarm_simple(self):
+ assert self.init_swarm()
+
+ @requires_api_version('1.24')
+ def test_init_swarm_force_new_cluster(self):
+ pytest.skip('Test stalls the engine on 1.12.0')
+
+ assert self.init_swarm()
+ version_1 = self.client.inspect_swarm()['Version']['Index']
+ assert self.client.init_swarm(force_new_cluster=True)
+ version_2 = self.client.inspect_swarm()['Version']['Index']
+ assert version_2 != version_1
+
+ @requires_api_version('1.24')
+ def test_init_already_in_cluster(self):
+ assert self.init_swarm()
+ with pytest.raises(docker.errors.APIError):
+ self.init_swarm()
+
+ @requires_api_version('1.24')
+ def test_init_swarm_custom_raft_spec(self):
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+
+ @requires_api_version('1.30')
+ def test_init_swarm_with_ca_config(self):
+ spec = self.client.create_swarm_spec(
+ node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000
+ )
+
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == (
+ spec['CAConfig']['NodeCertExpiry']
+ )
+ assert swarm_info['Spec']['CAConfig']['ForceRotate'] == (
+ spec['CAConfig']['ForceRotate']
+ )
+
+ @requires_api_version('1.25')
+ def test_init_swarm_with_autolock_managers(self):
+ spec = self.client.create_swarm_spec(autolock_managers=True)
+ assert self.init_swarm(swarm_spec=spec)
+ # save unlock key for tearDown
+ self._unlock_key = self.client.get_unlock_key()
+ swarm_info = self.client.inspect_swarm()
+
+ assert (
+ swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True
+ )
+
+ assert self._unlock_key.get('UnlockKey')
+
+ @requires_api_version('1.25')
+ @pytest.mark.xfail(
+ reason="This doesn't seem to be taken into account by the engine"
+ )
+ def test_init_swarm_with_log_driver(self):
+ spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}}
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+
+ assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == (
+ 'syslog'
+ )
+
+ @requires_api_version('1.24')
+ def test_leave_swarm(self):
+ assert self.init_swarm()
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.leave_swarm()
+ exc_info.value.response.status_code == 500
+ assert self.client.leave_swarm(force=True)
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.inspect_swarm()
+ exc_info.value.response.status_code == 406
+ assert self.client.leave_swarm(force=True)
+
+ @requires_api_version('1.24')
+ def test_update_swarm(self):
+ assert self.init_swarm()
+ swarm_info_1 = self.client.inspect_swarm()
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200,
+ node_cert_expiry=7776000000000000
+ )
+ assert self.client.update_swarm(
+ version=swarm_info_1['Version']['Index'],
+ swarm_spec=spec, rotate_worker_token=True
+ )
+ swarm_info_2 = self.client.inspect_swarm()
+
+ assert (
+ swarm_info_1['Version']['Index'] !=
+ swarm_info_2['Version']['Index']
+ )
+ assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert (
+ swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Manager'] ==
+ swarm_info_2['JoinTokens']['Manager']
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Worker'] !=
+ swarm_info_2['JoinTokens']['Worker']
+ )
+
+ @requires_api_version('1.24')
+ def test_list_nodes(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ assert 'ID' in node
+ assert 'Spec' in node
+ assert node['Spec']['Role'] == 'manager'
+
+ filtered_list = self.client.nodes(filters={
+ 'id': node['ID']
+ })
+ assert len(filtered_list) == 1
+ filtered_list = self.client.nodes(filters={
+ 'role': 'worker'
+ })
+ assert len(filtered_list) == 0
+
+ @requires_api_version('1.24')
+ def test_inspect_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ node_data = self.client.inspect_node(node['ID'])
+ assert node['ID'] == node_data['ID']
+ assert node['Version'] == node_data['Version']
+
+ @requires_api_version('1.24')
+ def test_update_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node = nodes_list[0]
+ orig_spec = node['Spec']
+
+ # add a new label
+ new_spec = copy.deepcopy(orig_spec)
+ new_spec['Labels'] = {'new.label': 'new value'}
+ self.client.update_node(node_id=node['ID'],
+ version=node['Version']['Index'],
+ node_spec=new_spec)
+ updated_node = self.client.inspect_node(node['ID'])
+ assert new_spec == updated_node['Spec']
+
+ # Revert the changes
+ self.client.update_node(node_id=node['ID'],
+ version=updated_node['Version']['Index'],
+ node_spec=orig_spec)
+ reverted_node = self.client.inspect_node(node['ID'])
+ assert orig_spec == reverted_node['Spec']
+
+ @requires_api_version('1.24')
+ def test_remove_main_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node_id = nodes_list[0]['ID']
+ with pytest.raises(docker.errors.NotFound):
+ self.client.remove_node('foobar01')
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id)
+
+ assert e.value.response.status_code >= 400
+
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id, True)
+
+ assert e.value.response.status_code >= 400
diff --git a/tests/integration/api_test.py b/tests/integration/api_test.py
deleted file mode 100644
index 67ed068..0000000
--- a/tests/integration/api_test.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import base64
-import os
-import tempfile
-import time
-import unittest
-import warnings
-
-import docker
-
-from .. import helpers
-
-
-class InformationTest(helpers.BaseTestCase):
- def test_version(self):
- res = self.client.version()
- self.assertIn('GoVersion', res)
- self.assertIn('Version', res)
- self.assertEqual(len(res['Version'].split('.')), 3)
-
- def test_info(self):
- res = self.client.info()
- self.assertIn('Containers', res)
- self.assertIn('Images', res)
- self.assertIn('Debug', res)
-
- def test_search(self):
- self.client = helpers.docker_client(timeout=10)
- res = self.client.search('busybox')
- self.assertTrue(len(res) >= 1)
- base_img = [x for x in res if x['name'] == 'busybox']
- self.assertEqual(len(base_img), 1)
- self.assertIn('description', base_img[0])
-
-
-class LinkTest(helpers.BaseTestCase):
- def test_remove_link(self):
- # Create containers
- container1 = self.client.create_container(
- helpers.BUSYBOX, 'cat', detach=True, stdin_open=True
- )
- container1_id = container1['Id']
- self.tmp_containers.append(container1_id)
- self.client.start(container1_id)
-
- # Create Link
- # we don't want the first /
- link_path = self.client.inspect_container(container1_id)['Name'][1:]
- link_alias = 'mylink'
-
- container2 = self.client.create_container(
- helpers.BUSYBOX, 'cat', host_config=self.client.create_host_config(
- links={link_path: link_alias}
- )
- )
- container2_id = container2['Id']
- self.tmp_containers.append(container2_id)
- self.client.start(container2_id)
-
- # Remove link
- linked_name = self.client.inspect_container(container2_id)['Name'][1:]
- link_name = '%s/%s' % (linked_name, link_alias)
- self.client.remove_container(link_name, link=True)
-
- # Link is gone
- containers = self.client.containers(all=True)
- retrieved = [x for x in containers if link_name in x['Names']]
- self.assertEqual(len(retrieved), 0)
-
- # Containers are still there
- retrieved = [
- x for x in containers if x['Id'].startswith(container1_id) or
- x['Id'].startswith(container2_id)
- ]
- self.assertEqual(len(retrieved), 2)
-
-
-class LoadConfigTest(helpers.BaseTestCase):
- def test_load_legacy_config(self):
- folder = tempfile.mkdtemp()
- self.tmp_folders.append(folder)
- cfg_path = os.path.join(folder, '.dockercfg')
- f = open(cfg_path, 'w')
- auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
- f.write('auth = {0}\n'.format(auth_))
- f.write('email = sakuya@scarlet.net')
- f.close()
- cfg = docker.auth.load_config(cfg_path)
- self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
- cfg = cfg[docker.auth.INDEX_NAME]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('Auth'), None)
-
- def test_load_json_config(self):
- folder = tempfile.mkdtemp()
- self.tmp_folders.append(folder)
- cfg_path = os.path.join(folder, '.dockercfg')
- f = open(os.path.join(folder, '.dockercfg'), 'w')
- auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
- email_ = 'sakuya@scarlet.net'
- f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
- docker.auth.INDEX_URL, auth_, email_))
- f.close()
- cfg = docker.auth.load_config(cfg_path)
- self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
- cfg = cfg[docker.auth.INDEX_URL]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('Auth'), None)
-
-
-class AutoDetectVersionTest(unittest.TestCase):
- def test_client_init(self):
- client = helpers.docker_client(version='auto')
- client_version = client._version
- api_version = client.version(api_version=False)['ApiVersion']
- self.assertEqual(client_version, api_version)
- api_version_2 = client.version()['ApiVersion']
- self.assertEqual(client_version, api_version_2)
- client.close()
-
- def test_auto_client(self):
- client = docker.AutoVersionClient(**helpers.docker_client_kwargs())
- client_version = client._version
- api_version = client.version(api_version=False)['ApiVersion']
- self.assertEqual(client_version, api_version)
- api_version_2 = client.version()['ApiVersion']
- self.assertEqual(client_version, api_version_2)
- client.close()
- with self.assertRaises(docker.errors.DockerException):
- docker.AutoVersionClient(
- **helpers.docker_client_kwargs(version='1.11')
- )
-
-
-class ConnectionTimeoutTest(unittest.TestCase):
- def setUp(self):
- self.timeout = 0.5
- self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
- timeout=self.timeout)
-
- def test_timeout(self):
- start = time.time()
- res = None
- # This call isn't supposed to complete, and it should fail fast.
- try:
- res = self.client.inspect_container('id')
- except:
- pass
- end = time.time()
- self.assertTrue(res is None)
- self.assertTrue(end - start < 2 * self.timeout)
-
-
-class UnixconnTest(unittest.TestCase):
- """
- Test UNIX socket connection adapter.
- """
-
- def test_resource_warnings(self):
- """
- Test no warnings are produced when using the client.
- """
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
-
- client = helpers.docker_client()
- client.images()
- client.close()
- del client
-
- assert len(w) == 0, \
- "No warnings produced: {0}".format(w[0].message)
diff --git a/tests/integration/volume_test.py b/tests/integration/api_volume_test.py
index 8fa2dab..8e7dd3a 100644
--- a/tests/integration/volume_test.py
+++ b/tests/integration/api_volume_test.py
@@ -1,20 +1,19 @@
import docker
import pytest
-from .. import helpers
-from ..base import requires_api_version
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
-@requires_api_version('1.21')
-class TestVolumes(helpers.BaseTestCase):
+class TestVolumes(BaseAPIIntegrationTest):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
@@ -27,16 +26,16 @@ class TestVolumes(helpers.BaseTestCase):
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
- self.assertIn('Volumes', result)
+ assert 'Volumes' in result
volumes = result['Volumes']
- self.assertIn(volume_info, volumes)
+ assert volume_info in volumes
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
- self.assertEqual(volume_info, result)
+ assert volume_info == result
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
@@ -49,6 +48,21 @@ class TestVolumes(helpers.BaseTestCase):
self.client.create_volume(name)
self.client.remove_volume(name)
+ @requires_api_version('1.25')
+ def test_force_remove_volume(self):
+ name = 'shootthebullet'
+ self.tmp_volumes.append(name)
+ self.client.create_volume(name)
+ self.client.remove_volume(name, force=True)
+
+ @requires_api_version('1.25')
+ def test_prune_volumes(self):
+ name = 'hopelessmasquerade'
+ self.client.create_volume(name)
+ self.tmp_volumes.append(name)
+ result = self.client.prune_volumes()
+ assert name in result['VolumesDeleted']
+
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
diff --git a/tests/integration/base.py b/tests/integration/base.py
new file mode 100644
index 0000000..56c23ed
--- /dev/null
+++ b/tests/integration/base.py
@@ -0,0 +1,125 @@
+import os
+import shutil
+import unittest
+
+import docker
+from docker.utils import kwargs_from_env
+
+from .. import helpers
+
+BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
+
+
+class BaseIntegrationTest(unittest.TestCase):
+ """
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
+ """
+
+ def setUp(self):
+ self.tmp_imgs = []
+ self.tmp_containers = []
+ self.tmp_folders = []
+ self.tmp_volumes = []
+ self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
+ self.tmp_configs = []
+
+ def tearDown(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+
+ def setUp(self):
+ super(BaseAPIIntegrationTest, self).setUp()
+ self.client = self.get_client_instance()
+
+ def tearDown(self):
+ super(BaseAPIIntegrationTest, self).tearDown()
+ self.client.close()
+
+ @staticmethod
+ def get_client_instance():
+ return docker.APIClient(
+ version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
+ )
+
+ @staticmethod
+ def _init_swarm(client, **kwargs):
+ return client.init_swarm(
+ '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
+
+ def run_container(self, *args, **kwargs):
+ container = self.client.create_container(*args, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+
+ if exitcode != 0:
+ output = self.client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}"
+ .format(exitcode, output))
+
+ return container
+
+ def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
+ container = self.client.create_container(
+ image=image, command=command, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ return container
+
+ def execute(self, container, cmd, exit_code=0, **kwargs):
+ exc = self.client.exec_create(container, cmd, **kwargs)
+ output = self.client.exec_start(exc)
+ actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
+ msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
+ " ".join(cmd), exit_code, actual_exit_code, output)
+ assert actual_exit_code == exit_code, msg
+
+ def init_swarm(self, **kwargs):
+ return self._init_swarm(self.client, **kwargs)
diff --git a/tests/integration/build_test.py b/tests/integration/build_test.py
deleted file mode 100644
index c7a5fbe..0000000
--- a/tests/integration/build_test.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import io
-import os
-import shutil
-import tempfile
-
-import six
-
-from docker import errors
-
-from .. import helpers
-from ..base import requires_api_version
-
-
-class BuildTest(helpers.BaseTestCase):
- def test_build_streaming(self):
- script = io.BytesIO('\n'.join([
- 'FROM busybox',
- 'MAINTAINER docker-py',
- 'RUN mkdir -p /tmp/test',
- 'EXPOSE 8080',
- 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
- ' /tmp/silence.tar.gz'
- ]).encode('ascii'))
- stream = self.client.build(fileobj=script, stream=True, decode=True)
- logs = []
- for chunk in stream:
- logs.append(chunk)
- assert len(logs) > 0
-
- def test_build_from_stringio(self):
- if six.PY3:
- return
- script = io.StringIO(six.text_type('\n').join([
- 'FROM busybox',
- 'MAINTAINER docker-py',
- 'RUN mkdir -p /tmp/test',
- 'EXPOSE 8080',
- 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
- ' /tmp/silence.tar.gz'
- ]))
- stream = self.client.build(fileobj=script, stream=True)
- logs = ''
- for chunk in stream:
- if six.PY3:
- chunk = chunk.decode('utf-8')
- logs += chunk
- self.assertNotEqual(logs, '')
-
- @requires_api_version('1.8')
- def test_build_with_dockerignore(self):
- base_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, base_dir)
-
- with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
- f.write("\n".join([
- 'FROM busybox',
- 'MAINTAINER docker-py',
- 'ADD . /test',
- ]))
-
- with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
- f.write("\n".join([
- 'ignored',
- 'Dockerfile',
- '.dockerignore',
- '!ignored/subdir/excepted-file',
- '', # empty line
- ]))
-
- with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
- f.write("this file should not be ignored")
-
- subdir = os.path.join(base_dir, 'ignored', 'subdir')
- os.makedirs(subdir)
- with open(os.path.join(subdir, 'file'), 'w') as f:
- f.write("this file should be ignored")
-
- with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
- f.write("this file should not be ignored")
-
- tag = 'docker-py-test-build-with-dockerignore'
- stream = self.client.build(
- path=base_dir,
- tag=tag,
- )
- for chunk in stream:
- pass
-
- c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
- self.client.start(c)
- self.client.wait(c)
- logs = self.client.logs(c)
-
- if six.PY3:
- logs = logs.decode('utf-8')
-
- self.assertEqual(
- sorted(list(filter(None, logs.split('\n')))),
- sorted(['/test/ignored/subdir/excepted-file',
- '/test/not-ignored']),
- )
-
- @requires_api_version('1.21')
- def test_build_with_buildargs(self):
- script = io.BytesIO('\n'.join([
- 'FROM scratch',
- 'ARG test',
- 'USER $test'
- ]).encode('ascii'))
-
- stream = self.client.build(
- fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
- )
- self.tmp_imgs.append('buildargs')
- for chunk in stream:
- pass
-
- info = self.client.inspect_image('buildargs')
- self.assertEqual(info['Config']['User'], 'OK')
-
- def test_build_stderr_data(self):
- control_chars = ['\x1b[91m', '\x1b[0m']
- snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
- script = io.BytesIO(b'\n'.join([
- b'FROM busybox',
- 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
- ]))
-
- stream = self.client.build(
- fileobj=script, stream=True, decode=True, nocache=True
- )
- lines = []
- for chunk in stream:
- lines.append(chunk.get('stream'))
- expected = '{0}{2}\n{1}'.format(
- control_chars[0], control_chars[1], snippet
- )
- self.assertTrue(any([line == expected for line in lines]))
-
- def test_build_gzip_encoding(self):
- base_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, base_dir)
-
- with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
- f.write("\n".join([
- 'FROM busybox',
- 'MAINTAINER docker-py',
- 'ADD . /test',
- ]))
-
- stream = self.client.build(
- path=base_dir, stream=True, decode=True, nocache=True,
- gzip=True
- )
-
- lines = []
- for chunk in stream:
- lines.append(chunk)
-
- assert 'Successfully built' in lines[-1]['stream']
-
- def test_build_gzip_custom_encoding(self):
- with self.assertRaises(errors.DockerException):
- self.client.build(path='.', gzip=True, encoding='text/html')
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
new file mode 100644
index 0000000..7df172c
--- /dev/null
+++ b/tests/integration/client_test.py
@@ -0,0 +1,49 @@
+import threading
+import unittest
+
+import docker
+
+from datetime import datetime, timedelta
+
+from ..helpers import requires_api_version
+from .base import TEST_API_VERSION
+
+
+class ClientTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_info(self):
+ info = self.client.info()
+ assert 'ID' in info
+ assert 'Name' in info
+
+ def test_ping(self):
+ assert self.client.ping() is True
+
+ def test_version(self):
+ assert 'Version' in self.client.version()
+
+ @requires_api_version('1.25')
+ def test_df(self):
+ data = self.client.df()
+ assert 'LayersSize' in data
+ assert 'Containers' in data
+ assert 'Volumes' in data
+ assert 'Images' in data
+
+
+class CancellableEventsTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_cancel_events(self):
+ start = datetime.now()
+
+ events = self.client.events(until=start + timedelta(seconds=5))
+
+ cancel_thread = threading.Timer(2, events.close)
+ cancel_thread.start()
+
+ for _ in events:
+ pass
+
+ self.assertLess(datetime.now() - start, timedelta(seconds=3))
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index e65dd1d..4e8d268 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -4,16 +4,16 @@ import sys
import warnings
import docker.errors
+from docker.utils import kwargs_from_env
import pytest
-from ..helpers import BUSYBOX
-from ..helpers import docker_client
+from .base import BUSYBOX
@pytest.fixture(autouse=True, scope='session')
def setup_test_session():
warnings.simplefilter('error')
- c = docker_client()
+ c = docker.APIClient(version='auto', **kwargs_from_env())
try:
c.inspect_image(BUSYBOX)
except docker.errors.NotFound:
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
new file mode 100644
index 0000000..ac74d72
--- /dev/null
+++ b/tests/integration/errors_test.py
@@ -0,0 +1,15 @@
+from docker.errors import APIError
+from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
+
+
+class ErrorsTest(BaseAPIIntegrationTest):
+ def test_api_error_parses_json(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '10'])
+ self.client.start(container['Id'])
+ with pytest.raises(APIError) as cm:
+ self.client.remove_container(container['Id'])
+ explanation = cm.value.explanation
+ assert 'You cannot remove a running container' in explanation
+ assert '{"message":' not in explanation
+ self.client.remove_container(container['Id'], force=True)
diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py
deleted file mode 100644
index f377e09..0000000
--- a/tests/integration/exec_test.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from docker.utils.socket import next_frame_size
-from docker.utils.socket import read_exactly
-
-from .. import helpers
-
-BUSYBOX = helpers.BUSYBOX
-
-
-class ExecTest(helpers.BaseTestCase):
- def test_execute_command(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
-
- res = self.client.exec_create(id, ['echo', 'hello'])
- self.assertIn('Id', res)
-
- exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'hello\n')
-
- def test_exec_command_string(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
-
- res = self.client.exec_create(id, 'echo hello world')
- self.assertIn('Id', res)
-
- exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'hello world\n')
-
- def test_exec_command_as_user(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
-
- res = self.client.exec_create(id, 'whoami', user='default')
- self.assertIn('Id', res)
-
- exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'default\n')
-
- def test_exec_command_as_root(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
-
- res = self.client.exec_create(id, 'whoami')
- self.assertIn('Id', res)
-
- exec_log = self.client.exec_start(res)
- self.assertEqual(exec_log, b'root\n')
-
- def test_exec_command_streaming(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.tmp_containers.append(id)
- self.client.start(id)
-
- exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
- self.assertIn('Id', exec_id)
-
- res = b''
- for chunk in self.client.exec_start(exec_id, stream=True):
- res += chunk
- self.assertEqual(res, b'hello\nworld\n')
-
- def test_exec_start_socket(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- container_id = container['Id']
- self.client.start(container_id)
- self.tmp_containers.append(container_id)
-
- line = 'yay, interactive exec!'
- # `echo` appends CRLF, `printf` doesn't
- exec_id = self.client.exec_create(
- container_id, ['printf', line], tty=True)
- self.assertIn('Id', exec_id)
-
- socket = self.client.exec_start(exec_id, socket=True)
- self.addCleanup(socket.close)
-
- next_size = next_frame_size(socket)
- self.assertEqual(next_size, len(line))
- data = read_exactly(socket, next_size)
- self.assertEqual(data.decode('utf-8'), line)
-
- def test_exec_inspect(self):
- container = self.client.create_container(BUSYBOX, 'cat',
- detach=True, stdin_open=True)
- id = container['Id']
- self.client.start(id)
- self.tmp_containers.append(id)
-
- exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
- self.assertIn('Id', exec_id)
- self.client.exec_start(exec_id)
- exec_info = self.client.exec_inspect(exec_id)
- self.assertIn('ExitCode', exec_info)
- self.assertNotEqual(exec_info['ExitCode'], 0)
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
new file mode 100644
index 0000000..6ddb034
--- /dev/null
+++ b/tests/integration/models_containers_test.py
@@ -0,0 +1,361 @@
+import tempfile
+import threading
+
+import docker
+import pytest
+from .base import BaseIntegrationTest, TEST_API_VERSION
+from ..helpers import random_name, requires_api_version
+
+
+class ContainerCollectionTest(BaseIntegrationTest):
+
+ def test_run(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ assert client.containers.run(
+ "alpine", "echo hello world", remove=True
+ ) == b'hello world\n'
+
+ def test_run_detach(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['Config']['Image'] == "alpine"
+ assert container.attrs['Config']['Cmd'] == ['sleep', '300']
+
+ def test_run_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ContainerError) as cm:
+ client.containers.run("alpine", "cat /test", remove=True)
+ assert cm.value.exit_status == 1
+ assert "cat /test" in cm.exconly()
+ assert "alpine" in cm.exconly()
+ assert "No such file or directory" in cm.exconly()
+
+ def test_run_with_image_that_does_not_exist(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ImageNotFound):
+ client.containers.run("dockerpytest_does_not_exist")
+
+ def test_run_with_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ path = tempfile.mkdtemp()
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["%s:/insidecontainer" % path],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ name = "container_volume_test"
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["%s:/insidecontainer" % path],
+ name=name
+ )
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
+
+ def test_run_with_named_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create(name="somevolume")
+ self.tmp_volumes.append(volume.id)
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["somevolume:/insidecontainer"],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ name = "container_volume_test"
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["somevolume:/insidecontainer"],
+ name=name
+ )
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
+
+ def test_run_with_network(self):
+ net_name = random_name()
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.networks.create(net_name)
+ self.tmp_networks.append(net_name)
+
+ container = client.containers.run(
+ 'alpine', 'echo hello world', network=net_name,
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+
+ attrs = container.attrs
+
+ assert 'NetworkSettings' in attrs
+ assert 'Networks' in attrs['NetworkSettings']
+ assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
+
+ def test_run_with_none_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ out = client.containers.run(
+ "alpine", "echo hello",
+ log_config=dict(type='none')
+ )
+ assert out is None
+
+ def test_run_with_json_file_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ out = client.containers.run(
+ "alpine", "echo hello",
+ log_config=dict(type='json-file')
+ )
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'echo hello', auto_remove=True
+ )
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ContainerError) as e:
+ client.containers.run(
+ 'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
+ )
+ assert e.value.exit_status == 1
+ assert e.value.stderr is None
+
+ def test_run_with_streamed_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+ logs = [line for line in out]
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
+
+ @pytest.mark.timeout(5)
+ def test_run_with_streamed_logs_and_cancel(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+
+ threading.Timer(1, out.close).start()
+
+ logs = [line for line in out]
+
+ assert len(logs) == 2
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert client.containers.get(container.id).attrs[
+ 'Config']['Image'] == "alpine"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list() if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Config']['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+ def test_list_sparse(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list(sparse=True) if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+ with pytest.raises(docker.errors.DockerException):
+ container.labels
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+
+class ContainerTest(BaseIntegrationTest):
+
+ def test_commit(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test'",
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+ image = container.commit()
+ assert client.containers.run(
+ image.id, "cat /test", remove=True
+ ) == b"hello\n"
+
+ def test_diff(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "touch /test", detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.diff() == [{'Path': '/test', 'Kind': 1}]
+
+ def test_exec_run_success(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ exec_output = container.exec_run("cat /test")
+ assert exec_output[0] == 0
+ assert exec_output[1] == b"hello\n"
+
+ def test_exec_run_failed(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ exec_output = container.exec_run("docker ps")
+ assert exec_output[0] == 126
+
+ def test_kill(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ while container.status != 'running':
+ container.reload()
+ assert container.status == 'running'
+ container.kill()
+ container.reload()
+ assert container.status == 'exited'
+
+ def test_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello world",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.logs() == b"hello world\n"
+
+ def test_pause(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ container.pause()
+ container.reload()
+ assert container.status == "paused"
+ container.unpause()
+ container.reload()
+ assert container.status == "running"
+
+ def test_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.id in [c.id for c in client.containers.list(all=True)]
+ container.wait()
+ container.remove()
+ containers = client.containers.list(all=True)
+ assert container.id not in [c.id for c in containers]
+
+ def test_rename(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", name="test1",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.name == "test1"
+ container.rename("test2")
+ container.reload()
+ assert container.name == "test2"
+
+ def test_restart(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.restart()
+ container.reload()
+ second_started_at = container.attrs['State']['StartedAt']
+ assert first_started_at != second_started_at
+
+ def test_start(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.create("alpine", "sleep 50", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status == "created"
+ container.start()
+ container.reload()
+ assert container.status == "running"
+
+ def test_stats(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ stats = container.stats(stream=False)
+ for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in stats
+
+ def test_stop(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "top", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status in ("running", "created")
+ container.stop(timeout=2)
+ container.reload()
+ assert container.status == "exited"
+
+ def test_top(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True)
+ self.tmp_containers.append(container.id)
+ top = container.top()
+ assert len(top['Processes']) == 1
+ assert 'sleep 60' in top['Processes'][0]
+
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True,
+ cpu_shares=2)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['CpuShares'] == 2
+ container.update(cpu_shares=3)
+ container.reload()
+ assert container.attrs['HostConfig']['CpuShares'] == 3
+
+ def test_wait(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sh -c 'exit 0'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait()['StatusCode'] == 0
+ container = client.containers.run("alpine", "sh -c 'exit 1'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait()['StatusCode'] == 1
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
new file mode 100644
index 0000000..ae735ba
--- /dev/null
+++ b/tests/integration/models_images_test.py
@@ -0,0 +1,136 @@
+import io
+import tempfile
+
+import docker
+import pytest
+
+from .base import BaseIntegrationTest, BUSYBOX, TEST_API_VERSION
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_build(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ ))
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ # @pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
+ def test_build_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.BuildError) as cm:
+ client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "RUN exit 1".encode('ascii')
+ ))
+ assert (
+ "The command '/bin/sh -c exit 1' returned a non-zero code: 1"
+ ) in cm.exconly()
+ assert cm.value.build_log
+
+ def test_build_with_multiple_success(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(
+ tag='some-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ def test_build_with_success_build_output(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(
+ tag='dup-txt-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo Successfully built abcd1234".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"Successfully built abcd1234\n"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list())
+
+ def test_list_with_repository(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list('alpine'))
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+ def test_pull(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert 'alpine:latest' in image.attrs['RepoTags']
+
+ def test_pull_with_tag(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine', tag='3.3')
+ assert 'alpine:3.3' in image.attrs['RepoTags']
+
+ def test_pull_with_sha(self):
+ image_ref = (
+ 'hello-world@sha256:083de497cff944f969d8499ab94f07134c50bcf5e6b95'
+ '59b27182d3fa80ce3f7'
+ )
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull(image_ref)
+ assert image_ref in image.attrs['RepoDigests']
+
+ def test_pull_multiple(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ images = client.images.pull('hello-world')
+ assert len(images) == 1
+ assert 'hello-world:latest' in images[0].attrs['RepoTags']
+
+ def test_load_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ImageLoadError):
+ client.images.load('abc')
+
+ def test_save_and_load(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(BUSYBOX)
+ with tempfile.TemporaryFile() as f:
+ stream = image.save()
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = client.images.load(f.read())
+
+ assert len(result) == 1
+ assert result[0].id == image.id
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_tag_and_remove(self):
+ repo = 'dockersdk.tests.images.test_tag'
+ tag = 'some-tag'
+ identifier = '{}:{}'.format(repo, tag)
+
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+
+ result = image.tag(repo, tag)
+ assert result is True
+ self.tmp_imgs.append(identifier)
+ assert image.id in get_ids(client.images.list(repo))
+ assert image.id in get_ids(client.images.list(identifier))
+
+ client.images.remove(identifier)
+ assert image.id not in get_ids(client.images.list(repo))
+ assert image.id not in get_ids(client.images.list(identifier))
+
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+
+def get_ids(images):
+ return [i.id for i in images]
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
new file mode 100644
index 0000000..08d7ad2
--- /dev/null
+++ b/tests/integration/models_networks_test.py
@@ -0,0 +1,70 @@
+import docker
+from .. import helpers
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class NetworkCollectionTest(BaseIntegrationTest):
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name, labels={'foo': 'bar'})
+ self.tmp_networks.append(network.id)
+ assert network.name == name
+ assert network.attrs['Labels']['foo'] == "bar"
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network_id = client.networks.create(name).id
+ self.tmp_networks.append(network_id)
+ network = client.networks.get(network_id)
+ assert network.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name)
+ self.tmp_networks.append(network.id)
+ assert network.id in [n.id for n in client.networks.list()]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(ids=["fdhjklfdfdshjkfds"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(ids=[network.id])
+ ]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(names=["fdshjklfdsjhkl"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(names=[name])
+ ]
+ network.remove()
+ assert network.id not in [n.id for n in client.networks.list()]
+
+
+class NetworkTest(BaseIntegrationTest):
+
+ def test_connect_disconnect(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network = client.networks.create(helpers.random_name())
+ self.tmp_networks.append(network.id)
+ container = client.containers.create("alpine", "sleep 300")
+ self.tmp_containers.append(container.id)
+ assert network.containers == []
+ network.connect(container)
+ container.start()
+ assert client.networks.get(network.id).containers == [container]
+ network_containers = list(
+ c
+ for net in client.networks.list(ids=[network.id], greedy=True)
+ for c in net.containers
+ )
+ assert network_containers == [container]
+ network.disconnect(container)
+ assert network.containers == []
+ assert client.networks.get(network.id).containers == []
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
new file mode 100644
index 0000000..3c8d48a
--- /dev/null
+++ b/tests/integration/models_nodes_test.py
@@ -0,0 +1,37 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+
+
+class NodesTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_list_get_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
+ nodes = client.nodes.list()
+ assert len(nodes) == 1
+ assert nodes[0].attrs['Spec']['Role'] == 'manager'
+
+ node = client.nodes.get(nodes[0].id)
+ assert node.id == nodes[0].id
+ assert node.attrs['Spec']['Role'] == 'manager'
+ assert node.version > 0
+
+ node = client.nodes.list()[0]
+ assert not node.attrs['Spec'].get('Labels')
+ node.update({
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ })
+ node.reload()
+ assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py
new file mode 100644
index 0000000..4aafe0c
--- /dev/null
+++ b/tests/integration/models_resources_test.py
@@ -0,0 +1,16 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class ModelTest(BaseIntegrationTest):
+
+ def test_reload(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.kill()
+ container.start()
+ assert container.attrs['State']['StartedAt'] == first_started_at
+ container.reload()
+ assert container.attrs['State']['StartedAt'] != first_started_at
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
new file mode 100644
index 0000000..36caa85
--- /dev/null
+++ b/tests/integration/models_services_test.py
@@ -0,0 +1,335 @@
+import unittest
+
+import docker
+import pytest
+
+from .. import helpers
+from .base import TEST_API_VERSION
+from docker.errors import InvalidArgument
+from docker.types.services import ServiceMode
+
+
+class ServiceTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ client = docker.from_env(version=TEST_API_VERSION)
+ helpers.force_leave_swarm(client)
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
+
+ @classmethod
+ def tearDownClass(cls):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ # create arguments
+ name=name,
+ labels={'foo': 'bar'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'}
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+
+ def test_create_with_network(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(
+ helpers.random_name(), driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=name,
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ networks=[network.id]
+ )
+ assert 'Networks' in service.attrs['Spec']['TaskTemplate']
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert len(networks) == 1
+ assert networks[0]['Target'] == network.id
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ image="alpine",
+ command="sleep 300"
+ )
+ service = client.services.get(service.id)
+ assert service.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ assert service in client.services.list()
+ service.remove()
+ assert service not in client.services.list()
+
+ def test_tasks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service1 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ service2 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service1.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service1.id
+
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service2.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service2.id
+
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Command'] == ["sleep", "600"]
+
+ def test_update_retains_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ labels = service.attrs['Spec']['Labels']
+ assert labels == {'service.label': 'SampleLabel'}
+
+ def test_update_retains_container_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container.label': 'SampleLabel'}
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Labels'] == {'container.label': 'SampleLabel'}
+
+ def test_update_remove_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ labels={},
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert not service.attrs['Spec'].get('Labels')
+
+ @pytest.mark.xfail(reason='Flaky test')
+ def test_update_retains_networks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network_name = helpers.random_name()
+ network = client.networks.create(
+ network_name, driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ networks=[network.id],
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.reload()
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert networks == [{'Target': network.id}]
+
+ def test_scale_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.update(
+ mode=docker.types.ServiceMode('replicated', replicas=2),
+ )
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.scale(2)
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_global_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ mode = ServiceMode('global')
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300",
+ mode=mode
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ with pytest.raises(InvalidArgument):
+ service.scale(2)
+
+ assert len(tasks) == 1
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=10,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_bool(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=True,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_shorthand_method(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.force_update()
+ service.reload()
+ assert service.version > initial_version
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
new file mode 100644
index 0000000..f39f0d3
--- /dev/null
+++ b/tests/integration/models_swarm_test.py
@@ -0,0 +1,33 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+import pytest
+
+
+class SwarmTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_init_update_leave(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init(
+ advertise_addr='127.0.0.1', snapshot_interval=5000,
+ listen_addr=helpers.swarm_listen_addr()
+ )
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
+ client.swarm.update(snapshot_interval=10000)
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
+ assert client.swarm.id
+ assert client.swarm.leave(force=True)
+ with pytest.raises(docker.errors.APIError) as cm:
+ client.swarm.reload()
+ assert (
+ cm.value.response.status_code == 406 or
+ cm.value.response.status_code == 503
+ )
diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py
new file mode 100644
index 0000000..47b4a45
--- /dev/null
+++ b/tests/integration/models_volumes_test.py
@@ -0,0 +1,30 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class VolumesTest(BaseIntegrationTest):
+ def test_create_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create(
+ 'dockerpytest_1',
+ driver='local',
+ labels={'labelkey': 'labelvalue'}
+ )
+ self.tmp_volumes.append(volume.id)
+ assert volume.id
+ assert volume.name == 'dockerpytest_1'
+ assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
+
+ volume = client.volumes.get(volume.id)
+ assert volume.name == 'dockerpytest_1'
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create('dockerpytest_1')
+ self.tmp_volumes.append(volume.id)
+ assert volume in client.volumes.list()
+ assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
+ assert volume not in client.volumes.list(filters={'name': 'foobar'})
+
+ volume.remove()
+ assert volume not in client.volumes.list()
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index 8b321cf..0fd4e43 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -4,18 +4,17 @@ import random
import docker
import six
-from .. import helpers
+from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
-BUSYBOX = helpers.BUSYBOX
-
-class TestRegressions(helpers.BaseTestCase):
+class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
- with self.assertRaises(docker.errors.APIError) as exc:
+ with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
- self.assertEqual(exc.exception.response.status_code, 500)
+ assert exc.value.response.status_code == 500
dfile.close()
def test_542_truncate_ids_client_side(self):
@@ -23,10 +22,10 @@ class TestRegressions(helpers.BaseTestCase):
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
- self.assertEqual(len(result[0]['Id']), 12)
+ assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
- with self.assertRaises(docker.errors.APIError):
+ with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
@@ -55,15 +54,12 @@ class TestRegressions(helpers.BaseTestCase):
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
- self.assertEqual(
- self.client.port(ctnr, 2000)[0]['HostPort'],
- six.text_type(tcp_port)
- )
- self.assertEqual(
- self.client.port(ctnr, '2000/tcp')[0]['HostPort'],
- six.text_type(tcp_port)
- )
- self.assertEqual(
- self.client.port(ctnr, '2000/udp')[0]['HostPort'],
- six.text_type(udp_port)
- )
+ assert self.client.port(
+ ctnr, 2000
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/tcp'
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/udp'
+ )[0]['HostPort'] == six.text_type(udp_port)
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
deleted file mode 100644
index 2b99316..0000000
--- a/tests/integration/service_test.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import random
-
-import docker
-
-from ..base import requires_api_version
-from .. import helpers
-
-
-BUSYBOX = helpers.BUSYBOX
-
-
-class ServiceTest(helpers.BaseTestCase):
- def setUp(self):
- super(ServiceTest, self).setUp()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
- self.client.init_swarm('eth0')
-
- def tearDown(self):
- super(ServiceTest, self).tearDown()
- for service in self.client.services(filters={'name': 'dockerpytest_'}):
- try:
- self.client.remove_service(service['ID'])
- except docker.errors.APIError:
- pass
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
-
- def get_service_name(self):
- return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
-
- def create_simple_service(self, name=None):
- if name:
- name = 'dockerpytest_{0}'.format(name)
- else:
- name = self.get_service_name()
-
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
- )
- task_tmpl = docker.types.TaskTemplate(container_spec)
- return name, self.client.create_service(task_tmpl, name=name)
-
- @requires_api_version('1.24')
- def test_list_services(self):
- services = self.client.services()
- assert isinstance(services, list)
-
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
- self.create_simple_service()
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 1
- assert 'dockerpytest_' in test_services[0]['Spec']['Name']
-
- def test_inspect_service_by_id(self):
- svc_name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_id)
- assert 'ID' in svc_info
- assert svc_info['ID'] == svc_id['ID']
-
- def test_inspect_service_by_name(self):
- svc_name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_name)
- assert 'ID' in svc_info
- assert svc_info['ID'] == svc_id['ID']
-
- def test_remove_service_by_id(self):
- svc_name, svc_id = self.create_simple_service()
- assert self.client.remove_service(svc_id)
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
-
- def test_remove_service_by_name(self):
- svc_name, svc_id = self.create_simple_service()
- assert self.client.remove_service(svc_name)
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
-
- def test_create_service_simple(self):
- name, svc_id = self.create_simple_service()
- assert self.client.inspect_service(svc_id)
- services = self.client.services(filters={'name': name})
- assert len(services) == 1
- assert services[0]['ID'] == svc_id['ID']
-
- def test_create_service_custom_log_driver(self):
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
- )
- log_cfg = docker.types.DriverConfig('none')
- task_tmpl = docker.types.TaskTemplate(
- container_spec, log_driver=log_cfg
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'TaskTemplate' in svc_info['Spec']
- res_template = svc_info['Spec']['TaskTemplate']
- assert 'LogDriver' in res_template
- assert 'Name' in res_template['LogDriver']
- assert res_template['LogDriver']['Name'] == 'none'
-
- def test_create_service_with_volume_mount(self):
- vol_name = self.get_service_name()
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['ls'],
- mounts=[
- docker.types.Mount(target='/test', source=vol_name)
- ]
- )
- self.tmp_volumes.append(vol_name)
- task_tmpl = docker.types.TaskTemplate(container_spec)
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
- cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
- assert 'Mounts' in cspec
- assert len(cspec['Mounts']) == 1
- mount = cspec['Mounts'][0]
- assert mount['Target'] == '/test'
- assert mount['Source'] == vol_name
- assert mount['Type'] == 'volume'
-
- def test_create_service_with_resources_constraints(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- resources = docker.types.Resources(
- cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
- cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
- )
- task_tmpl = docker.types.TaskTemplate(
- container_spec, resources=resources
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'TaskTemplate' in svc_info['Spec']
- res_template = svc_info['Spec']['TaskTemplate']
- assert 'Resources' in res_template
- assert res_template['Resources']['Limits'] == resources['Limits']
- assert res_template['Resources']['Reservations'] == resources[
- 'Reservations'
- ]
-
- def test_create_service_with_update_config(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- task_tmpl = docker.types.TaskTemplate(container_spec)
- update_config = docker.types.UpdateConfig(
- parallelism=10, delay=5, failure_action='pause'
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(
- task_tmpl, update_config=update_config, name=name
- )
- svc_info = self.client.inspect_service(svc_id)
- assert 'UpdateConfig' in svc_info['Spec']
- assert update_config == svc_info['Spec']['UpdateConfig']
-
- def test_create_service_with_restart_policy(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- policy = docker.types.RestartPolicy(
- docker.types.RestartPolicy.condition_types.ANY,
- delay=5, max_attempts=5
- )
- task_tmpl = docker.types.TaskTemplate(
- container_spec, restart_policy=policy
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
- assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
-
- def test_update_service_name(self):
- name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_id)
- svc_version = svc_info['Version']['Index']
- new_name = self.get_service_name()
- assert self.client.update_service(
- svc_id, svc_version, name=new_name,
- task_template=svc_info['Spec']['TaskTemplate']
- )
- svc_info = self.client.inspect_service(svc_id)
- assert svc_info['Spec']['Name'] == new_name
diff --git a/tests/integration/swarm_test.py b/tests/integration/swarm_test.py
deleted file mode 100644
index 128628e..0000000
--- a/tests/integration/swarm_test.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import docker
-import pytest
-
-from ..base import requires_api_version
-from .. import helpers
-
-
-BUSYBOX = helpers.BUSYBOX
-
-
-class SwarmTest(helpers.BaseTestCase):
- def setUp(self):
- super(SwarmTest, self).setUp()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
-
- def tearDown(self):
- super(SwarmTest, self).tearDown()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
-
- @requires_api_version('1.24')
- def test_init_swarm_simple(self):
- assert self.client.init_swarm('eth0')
-
- @requires_api_version('1.24')
- def test_init_swarm_force_new_cluster(self):
- pytest.skip('Test stalls the engine on 1.12.0')
-
- assert self.client.init_swarm('eth0')
- version_1 = self.client.inspect_swarm()['Version']['Index']
- assert self.client.init_swarm('eth0', force_new_cluster=True)
- version_2 = self.client.inspect_swarm()['Version']['Index']
- assert version_2 != version_1
-
- @requires_api_version('1.24')
- def test_init_already_in_cluster(self):
- assert self.client.init_swarm('eth0')
- with pytest.raises(docker.errors.APIError):
- self.client.init_swarm('eth0')
-
- @requires_api_version('1.24')
- def test_init_swarm_custom_raft_spec(self):
- spec = self.client.create_swarm_spec(
- snapshot_interval=5000, log_entries_for_slow_followers=1200
- )
- assert self.client.init_swarm(
- advertise_addr='eth0', swarm_spec=spec
- )
- swarm_info = self.client.inspect_swarm()
- assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
- assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
-
- @requires_api_version('1.24')
- def test_leave_swarm(self):
- assert self.client.init_swarm('eth0')
- with pytest.raises(docker.errors.APIError) as exc_info:
- self.client.leave_swarm()
- exc_info.value.response.status_code == 500
- assert self.client.leave_swarm(force=True)
- with pytest.raises(docker.errors.APIError) as exc_info:
- self.client.inspect_swarm()
- exc_info.value.response.status_code == 406
-
- @requires_api_version('1.24')
- def test_update_swarm(self):
- assert self.client.init_swarm('eth0')
- swarm_info_1 = self.client.inspect_swarm()
- spec = self.client.create_swarm_spec(
- snapshot_interval=5000, log_entries_for_slow_followers=1200,
- node_cert_expiry=7776000000000000
- )
- assert self.client.update_swarm(
- version=swarm_info_1['Version']['Index'],
- swarm_spec=spec, rotate_worker_token=True
- )
- swarm_info_2 = self.client.inspect_swarm()
-
- assert (
- swarm_info_1['Version']['Index'] !=
- swarm_info_2['Version']['Index']
- )
- assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
- assert (
- swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
- )
- assert (
- swarm_info_1['JoinTokens']['Manager'] ==
- swarm_info_2['JoinTokens']['Manager']
- )
- assert (
- swarm_info_1['JoinTokens']['Worker'] !=
- swarm_info_2['JoinTokens']['Worker']
- )
-
- @requires_api_version('1.24')
- def test_update_swarm_name(self):
- assert self.client.init_swarm('eth0')
- swarm_info_1 = self.client.inspect_swarm()
- spec = self.client.create_swarm_spec(
- node_cert_expiry=7776000000000000, name='reimuhakurei'
- )
- assert self.client.update_swarm(
- version=swarm_info_1['Version']['Index'], swarm_spec=spec
- )
- swarm_info_2 = self.client.inspect_swarm()
-
- assert (
- swarm_info_1['Version']['Index'] !=
- swarm_info_2['Version']['Index']
- )
- assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
-
- @requires_api_version('1.24')
- def test_list_nodes(self):
- assert self.client.init_swarm('eth0')
- nodes_list = self.client.nodes()
- assert len(nodes_list) == 1
- node = nodes_list[0]
- assert 'ID' in node
- assert 'Spec' in node
- assert node['Spec']['Role'] == 'manager'
-
- filtered_list = self.client.nodes(filters={
- 'id': node['ID']
- })
- assert len(filtered_list) == 1
- filtered_list = self.client.nodes(filters={
- 'role': 'worker'
- })
- assert len(filtered_list) == 0
-
- @requires_api_version('1.24')
- def test_inspect_node(self):
- assert self.client.init_swarm('eth0')
- nodes_list = self.client.nodes()
- assert len(nodes_list) == 1
- node = nodes_list[0]
- node_data = self.client.inspect_node(node['ID'])
- assert node['ID'] == node_data['ID']
- assert node['Version'] == node_data['Version']
diff --git a/tests/integration/testdata/dummy-plugin/config.json b/tests/integration/testdata/dummy-plugin/config.json
new file mode 100644
index 0000000..53b4e7a
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/config.json
@@ -0,0 +1,19 @@
+{
+ "description": "Dummy test plugin for docker python SDK",
+ "documentation": "https://github.com/docker/docker-py",
+ "entrypoint": ["/dummy"],
+ "network": {
+ "type": "host"
+ },
+ "interface" : {
+ "types": ["docker.volumedriver/1.0"],
+ "socket": "dummy.sock"
+ },
+ "env": [
+ {
+ "name":"DEBUG",
+ "settable":["value"],
+ "value":"0"
+ }
+ ]
+}
diff --git a/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
diff --git a/tests/unit/build_test.py b/tests/unit/api_build_test.py
index b2705eb..a7f34fd 100644
--- a/tests/unit/build_test.py
+++ b/tests/unit/api_build_test.py
@@ -4,14 +4,14 @@ import io
import docker
from docker import auth
-from .api_test import DockerClientTest, fake_request, url_prefix
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
+import pytest
-class BuildTest(DockerClientTest):
+class BuildTest(BaseAPIClientTest):
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -23,7 +23,6 @@ class BuildTest(DockerClientTest):
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -32,22 +31,9 @@ class BuildTest(DockerClientTest):
self.client.build(fileobj=script, pull=True)
- def test_build_container_stream(self):
- script = io.BytesIO('\n'.join([
- 'FROM busybox',
- 'MAINTAINER docker-py',
- 'RUN mkdir -p /tmp/test',
- 'EXPOSE 8080',
- 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
- ' /tmp/silence.tar.gz'
- ]).encode('ascii'))
-
- self.client.build(fileobj=script, stream=True)
-
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -60,7 +46,6 @@ class BuildTest(DockerClientTest):
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -77,10 +62,12 @@ class BuildTest(DockerClientTest):
def test_build_remote_with_registry_auth(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
@@ -89,7 +76,10 @@ class BuildTest(DockerClientTest):
'forcerm': False,
'remote': 'https://github.com/docker-library/mongo'}
expected_headers = {
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
self.client.build(path='https://github.com/docker-library/mongo')
@@ -115,44 +105,53 @@ class BuildTest(DockerClientTest):
})
def test_build_container_invalid_container_limits(self):
- self.assertRaises(
- docker.errors.DockerException,
- lambda: self.client.build('.', container_limits={
+ with pytest.raises(docker.errors.DockerException):
+ self.client.build('.', container_limits={
'foo': 'bar'
})
- )
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
headers = {}
expected_headers = {
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
+
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
def test_set_auth_headers_with_dict_and_auth_configs(self):
self.client._auth_configs = {
- 'https://example.com': {
- 'user': 'example',
- 'password': 'example',
- 'email': 'example@example.com'
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
}
}
headers = {'foo': 'bar'}
expected_headers = {
- 'foo': 'bar',
- 'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ ),
+ 'foo': 'bar'
+ }
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
def test_set_auth_headers_with_dict_and_no_auth_configs(self):
headers = {'foo': 'bar'}
@@ -161,4 +160,4 @@ class BuildTest(DockerClientTest):
}
self.client._set_auth_headers(headers)
- self.assertEqual(headers, expected_headers)
+ assert headers == expected_headers
diff --git a/tests/unit/container_test.py b/tests/unit/api_container_test.py
index db3dd74..c33f129 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/api_container_test.py
@@ -9,9 +9,9 @@ import pytest
import six
from . import fake_api
-from ..base import requires_api_version
+from ..helpers import requires_api_version
from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
)
@@ -25,63 +25,45 @@ def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
-class StartContainerTest(DockerClientTest):
+class StartContainerTest(BaseAPIClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/3cc2351ab11b/start'
- )
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
+ assert 'data' not in args[1]
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
- self.assertEqual(
- str(excinfo.value),
- 'image or container param is undefined',
- )
+ assert str(excinfo.value) == 'Resource ID was not provided'
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
- self.assertEqual(
- str(excinfo.value),
- 'image or container param is undefined',
- )
+ assert str(excinfo.value) == 'Resource ID was not provided'
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_lxc_conf_compat(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_ro(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
@@ -91,22 +73,18 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_rw(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_port_binds(self):
self.maxDiff = None
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
@@ -116,18 +94,14 @@ class StartContainerTest(DockerClientTest):
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_multiple_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
@@ -136,54 +110,38 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links_as_list_of_tuples(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
- pytest.deprecated_call(call_start)
-
def test_start_container_privileged(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/3cc2351ab11b/start'
- )
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
+ assert 'data' not in args[1]
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
-class CreateContainerTest(DockerClientTest):
+class CreateContainerTest(BaseAPIClientTest):
def test_create_container(self):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": false,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": false,
- "OpenStdin": false, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_binds(self):
mount_dest = '/mnt'
@@ -192,19 +150,17 @@ class CreateContainerTest(DockerClientTest):
volumes=[mount_dest])
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls", "/mnt"], "AttachStdin": false,
- "Volumes": {"/mnt": {}},
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
@@ -213,82 +169,56 @@ class CreateContainerTest(DockerClientTest):
volumes=mount_dest)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls", "/mnt"], "AttachStdin": false,
- "Volumes": {"/mnt": {}},
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "ExposedPorts": {
- "1111/tcp": {},
- "2222/udp": {},
- "3333/tcp": {}
- },
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "ExposedPorts": {
+ "1111/tcp": {},
+ "2222/udp": {},
+ "3333/tcp": {}
+ },
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["hello"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "Entrypoint": ["cowsay", "entry"]}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_cpu_shares(self):
- with pytest.deprecated_call():
- self.client.create_container('busybox', 'ls', cpu_shares=5)
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["hello"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "Entrypoint": ["cowsay", "entry"]}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
- args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "CpuShares": 5}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- @requires_api_version('1.18')
def test_create_container_with_host_config_cpu_shares(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -297,45 +227,22 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpuShares": 512,
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_cpuset(self):
- with pytest.deprecated_call():
- self.client.create_container('busybox', 'ls', cpuset='0,1')
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuShares": 512,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
- args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "Cpuset": "0,1",
- "CpusetCpus": "0,1"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- @requires_api_version('1.18')
def test_create_container_with_host_config_cpuset(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
@@ -344,23 +251,45 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
-
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "CpuSetCpus": "0,1",
- "NetworkMode": "default"
- }}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetCpus": "0,1",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_host_config_cpuset_mems(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpuset_mems='0'
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetMems": "0",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
@@ -370,90 +299,58 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
- self.assertIn('HostConfig', data)
- self.assertIn('CgroupParent', data['HostConfig'])
- self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
+ assert 'HostConfig' in data
+ assert 'CgroupParent' in data['HostConfig']
+ assert data['HostConfig']['CgroupParent'] == 'test'
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "WorkingDir": "/root"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "WorkingDir": "/root"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": true,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": true,
- "OpenStdin": true, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_with_volumes_from(self):
- vol_names = ['foo', 'bar']
- try:
- self.client.create_container('busybox', 'true',
- volumes_from=vol_names)
- except docker.errors.DockerException:
- self.assertTrue(
- docker.utils.compare_version('1.10', self.client._version) >= 0
- )
- return
-
- args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
- ','.join(vol_names))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_create_container_empty_volumes_from(self):
- self.client.create_container('busybox', 'true', volumes_from=[])
-
- args = fake_request.call_args
- data = json.loads(args[1]['data'])
- self.assertTrue('VolumesFrom' not in data)
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": true,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": true,
+ "OpenStdin": true, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox", "Cmd": ["true"],
- "AttachStdin": false,
- "AttachStderr": true, "AttachStdout": true,
- "StdinOnce": false,
- "OpenStdin": false, "NetworkDisabled": false}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['params'] == {'name': 'marisa-kirisame'}
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
@@ -464,7 +361,7 @@ class CreateContainerTest(DockerClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0)
+ assert data['HostConfig']['Memory'] == 128.0
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
@@ -475,7 +372,7 @@ class CreateContainerTest(DockerClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0)
+ assert data['HostConfig']['Memory'] == 128.0
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
@@ -486,7 +383,7 @@ class CreateContainerTest(DockerClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
+ assert data['HostConfig']['Memory'] == 128.0 * 1024
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
@@ -497,7 +394,7 @@ class CreateContainerTest(DockerClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
@@ -508,20 +405,14 @@ class CreateContainerTest(DockerClientTest):
args = fake_request.call_args
data = json.loads(args[1]['data'])
- self.assertEqual(
- data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
- )
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024 * 1024
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
- self.assertRaises(
- docker.errors.DockerException,
- self.client.create_host_config, mem_limit='128p'
- )
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='128p')
- self.assertRaises(
- docker.errors.DockerException,
- self.client.create_host_config, mem_limit='1f28'
- )
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='1f28')
def test_create_container_with_lxc_conf(self):
self.client.create_container(
@@ -531,25 +422,16 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'],
- {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
@@ -559,20 +441,15 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
- self.assertEqual(
- json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
@@ -588,18 +465,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
@@ -615,18 +487,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
@@ -642,18 +509,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
@@ -680,21 +542,16 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_port_binds(self):
self.maxDiff = None
@@ -713,42 +570,31 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
- self.assertTrue('1111/tcp' in port_bindings)
- self.assertTrue('2222/tcp' in port_bindings)
- self.assertTrue('3333/udp' in port_bindings)
- self.assertTrue('4444/tcp' in port_bindings)
- self.assertTrue('5555/tcp' in port_bindings)
- self.assertTrue('6666/tcp' in port_bindings)
- self.assertEqual(
- [{"HostPort": "", "HostIp": ""}],
- port_bindings['1111/tcp']
- )
- self.assertEqual(
- [{"HostPort": "2222", "HostIp": ""}],
- port_bindings['2222/tcp']
- )
- self.assertEqual(
- [{"HostPort": "3333", "HostIp": ""}],
- port_bindings['3333/udp']
- )
- self.assertEqual(
- [{"HostPort": "", "HostIp": "127.0.0.1"}],
- port_bindings['4444/tcp']
- )
- self.assertEqual(
- [{"HostPort": "5555", "HostIp": "127.0.0.1"}],
- port_bindings['5555/tcp']
- )
- self.assertEqual(len(port_bindings['6666/tcp']), 2)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert '1111/tcp' in port_bindings
+ assert '2222/tcp' in port_bindings
+ assert '3333/udp' in port_bindings
+ assert '4444/tcp' in port_bindings
+ assert '5555/tcp' in port_bindings
+ assert '6666/tcp' in port_bindings
+ assert [{"HostPort": "", "HostIp": ""}] == port_bindings['1111/tcp']
+ assert [
+ {"HostPort": "2222", "HostIp": ""}
+ ] == port_bindings['2222/tcp']
+ assert [
+ {"HostPort": "3333", "HostIp": ""}
+ ] == port_bindings['3333/udp']
+ assert [
+ {"HostPort": "", "HostIp": "127.0.0.1"}
+ ] == port_bindings['4444/tcp']
+ assert [
+ {"HostPort": "5555", "HostIp": "127.0.0.1"}
+ ] == port_bindings['5555/tcp']
+ assert len(port_bindings['6666/tcp']) == 2
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_mac_address(self):
expected = "02:42:ac:11:00:0a"
@@ -760,7 +606,7 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
data = json.loads(args[1]['data'])
assert data['MacAddress'] == expected
@@ -775,17 +621,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_multiple_links(self):
link_path = 'path'
@@ -801,16 +643,14 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
@@ -823,15 +663,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
def test_create_container_privileged(self):
self.client.create_container(
@@ -843,14 +681,10 @@ class CreateContainerTest(DockerClientTest):
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_restart_policy(self):
self.client.create_container(
@@ -863,21 +697,17 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
+ assert json.loads(args[1]['data']) == expected_payload
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_added_capabilities(self):
self.client.create_container(
@@ -886,17 +716,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
@@ -905,17 +731,13 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_devices(self):
self.client.create_container(
@@ -927,7 +749,7 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
@@ -941,13 +763,9 @@ class CreateContainerTest(DockerClientTest):
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_labels_dict(self):
labels_dict = {
@@ -961,14 +779,10 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_labels_list(self):
labels_list = [
@@ -986,14 +800,10 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
@@ -1002,47 +812,39 @@ class CreateContainerTest(DockerClientTest):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
+ volume_driver='foodriver',
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
- volume_driver='foodriver',
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/create'
- )
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
- expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_stop_signal(self):
self.client.create_container('busybox', 'ls',
stop_signal='SIGINT')
args = fake_request.call_args
- self.assertEqual(args[0][1],
- url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "StopSignal": "SIGINT"}'''))
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "StopSignal": "SIGINT"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
@requires_api_version('1.22')
def test_create_container_with_aliases(self):
@@ -1059,22 +861,22 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(json.loads(args[1]['data']),
- json.loads('''
- {"Tty": false, "Image": "busybox",
- "Cmd": ["ls"], "AttachStdin": false,
- "AttachStderr": true,
- "AttachStdout": true, "OpenStdin": false,
- "StdinOnce": false,
- "NetworkDisabled": false,
- "HostConfig": {
- "NetworkMode": "some-network"
- },
- "NetworkingConfig": {
- "EndpointsConfig": {
- "some-network": {"Aliases": ["foo", "bar"]}
- }
- }}'''))
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "NetworkMode": "some-network"
+ },
+ "NetworkingConfig": {
+ "EndpointsConfig": {
+ "some-network": {"Aliases": ["foo", "bar"]}
+ }
+ }}
+ ''')
@requires_api_version('1.22')
def test_create_container_with_tmpfs_list(self):
@@ -1089,21 +891,16 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
@requires_api_version('1.22')
def test_create_container_with_tmpfs_dict(self):
@@ -1118,21 +915,16 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix +
- 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
- self.assertEqual(
- args[1]['timeout'],
- DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
@requires_api_version('1.24')
def test_create_container_with_sysctl(self):
@@ -1147,19 +939,15 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
+ assert args[0][1] == url_prefix + 'containers/create'
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Sysctls'] = {
'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
}
- self.assertEqual(json.loads(args[1]['data']), expected_payload)
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
- self.assertEqual(
- args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
- )
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
@@ -1176,11 +964,40 @@ class CreateContainerTest(DockerClientTest):
)
args = fake_request.call_args
- self.assertEqual(args[0][1], url_prefix + 'containers/create')
- self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Env'] == expected
+ @requires_api_version('1.25')
+ def test_create_container_with_host_config_cpus(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpu_count=1,
+ cpu_percent=20,
+ nano_cpus=1000
+ )
+ )
-class ContainerTest(DockerClientTest):
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuCount": 1,
+ "CpuPercent": 20,
+ "NanoCpus": 1000,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+
+class ContainerTest(BaseAPIClientTest):
def test_list_containers(self):
self.client.containers(all=True)
@@ -1231,7 +1048,8 @@ class ContainerTest(DockerClientTest):
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
- timeout=None
+ timeout=None,
+ params={}
)
def test_wait_with_dict_instead_of_id(self):
@@ -1240,11 +1058,12 @@ class ContainerTest(DockerClientTest):
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
- timeout=None
+ timeout=None,
+ params={}
)
def test_logs(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
@@ -1257,13 +1076,10 @@ class ContainerTest(DockerClientTest):
stream=False
)
- self.assertEqual(
- logs,
- 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
- )
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
def test_logs_with_dict_instead_of_id(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -1276,13 +1092,10 @@ class ContainerTest(DockerClientTest):
stream=False
)
- self.assertEqual(
- logs,
- 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
- )
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
def test_log_streaming(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=False)
@@ -1297,7 +1110,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=True)
@@ -1312,7 +1125,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following_backwards(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
@@ -1326,7 +1139,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_streaming_and_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=True)
@@ -1342,7 +1155,7 @@ class ContainerTest(DockerClientTest):
def test_log_tail(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, tail=10)
@@ -1358,7 +1171,7 @@ class ContainerTest(DockerClientTest):
def test_log_since(self):
ts = 809222400
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=ts)
@@ -1375,7 +1188,7 @@ class ContainerTest(DockerClientTest):
def test_log_since_with_datetime(self):
ts = 809222400
time = datetime.datetime.utcfromtimestamp(ts)
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=time)
@@ -1389,16 +1202,23 @@ class ContainerTest(DockerClientTest):
stream=False
)
+ def test_log_since_with_invalid_value_raises_error(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ with pytest.raises(docker.errors.InvalidArgument):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, since=42.42)
+
def test_log_tty(self):
m = mock.Mock()
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container_tty):
- with mock.patch('docker.Client._stream_raw_result',
+ with mock.patch('docker.api.client.APIClient._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
- self.assertTrue(m.called)
+ assert m.called
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
@@ -1582,9 +1402,7 @@ class ContainerTest(DockerClientTest):
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
- self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
- )
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
@@ -1623,13 +1441,8 @@ class ContainerTest(DockerClientTest):
blkio_weight=345
)
args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'containers/3cc2351ab11b/update'
- )
- self.assertEqual(
- json.loads(args[1]['data']),
- {'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345}
- )
- self.assertEqual(
- args[1]['headers']['Content-Type'], 'application/json'
- )
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/update'
+ assert json.loads(args[1]['data']) == {
+ 'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345
+ }
+ assert args[1]['headers']['Content-Type'] == 'application/json'
diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py
new file mode 100644
index 0000000..a9d2dd5
--- /dev/null
+++ b/tests/unit/api_exec_test.py
@@ -0,0 +1,83 @@
+import json
+
+from . import fake_api
+from .api_test import (
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+)
+
+
+class ExecTest(BaseAPIClientTest):
+ def test_exec_create(self):
+ self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
+
+ args = fake_request.call_args
+ assert 'POST' == args[0][0], url_prefix + 'containers/{0}/exec'.format(
+ fake_api.FAKE_CONTAINER_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'AttachStdout': True,
+ 'Container': fake_api.FAKE_CONTAINER_ID,
+ 'Cmd': ['ls', '-1'],
+ 'Privileged': False,
+ 'AttachStdin': False,
+ 'AttachStderr': True,
+ 'User': ''
+ }
+
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_exec_start(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': False,
+ }
+
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json',
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ def test_exec_start_detached(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': True
+ }
+
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json'
+ }
+
+ def test_exec_inspect(self):
+ self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/json'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ def test_exec_resize(self):
+ self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
+ params={'h': 20, 'w': 60},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
diff --git a/tests/unit/image_test.py b/tests/unit/api_image_test.py
index b2b1dd6..1e2315d 100644
--- a/tests/unit/image_test.py
+++ b/tests/unit/api_image_test.py
@@ -4,7 +4,7 @@ import pytest
from . import fake_api
from docker import auth
from .api_test import (
- DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
+ BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
)
@@ -14,7 +14,7 @@ except ImportError:
import mock
-class ImageTest(DockerClientTest):
+class ImageTest(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
@@ -65,29 +65,21 @@ class ImageTest(DockerClientTest):
self.client.pull('joffrey/test001')
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'images/create'
- )
- self.assertEqual(
- args[1]['params'],
- {'tag': None, 'fromImage': 'joffrey/test001'}
- )
- self.assertFalse(args[1]['stream'])
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert not args[1]['stream']
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
- self.assertEqual(
- args[0][1],
- url_prefix + 'images/create'
- )
- self.assertEqual(
- args[1]['params'],
- {'tag': None, 'fromImage': 'joffrey/test001'}
- )
- self.assertTrue(args[1]['stream'])
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert args[1]['stream']
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
@@ -203,32 +195,10 @@ class ImageTest(DockerClientTest):
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
- self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
- )
-
- def test_insert_image(self):
- try:
- self.client.insert(fake_api.FAKE_IMAGE_NAME,
- fake_api.FAKE_URL, fake_api.FAKE_PATH)
- except docker.errors.DeprecatedMethod:
- self.assertTrue(
- docker.utils.compare_version('1.12', self.client._version) >= 0
- )
- return
-
- fake_request.assert_called_with(
- 'POST',
- url_prefix + 'images/test_image/insert',
- params={
- 'url': fake_api.FAKE_URL,
- 'path': fake_api.FAKE_PATH
- },
- timeout=DEFAULT_TIMEOUT_SECONDS
- )
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
def test_push_image(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
@@ -245,7 +215,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_with_tag(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
@@ -271,9 +241,9 @@ class ImageTest(DockerClientTest):
}
encoded_auth = auth.encode_header(auth_config)
self.client.push(
- fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
- auth_config=auth_config
- )
+ fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
+ auth_config=auth_config
+ )
fake_request.assert_called_with(
'POST',
@@ -289,7 +259,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_stream(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
@@ -369,5 +339,19 @@ class ImageTest(DockerClientTest):
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
+ stream=True,
+ params={},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_load_image_quiet(self):
+ self.client.load_image('Byte Stream....', quiet=True)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/load',
+ data='Byte Stream....',
+ stream=True,
+ params={'quiet': True},
timeout=DEFAULT_TIMEOUT_SECONDS
)
diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py
new file mode 100644
index 0000000..c78554d
--- /dev/null
+++ b/tests/unit/api_network_test.py
@@ -0,0 +1,169 @@
+import json
+
+import six
+
+from .api_test import BaseAPIClientTest, url_prefix, response
+from docker.types import IPAMConfig, IPAMPool
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class NetworkTest(BaseAPIClientTest):
+ def test_list_networks(self):
+ networks = [
+ {
+ "name": "none",
+ "id": "8e4e55c6863ef424",
+ "type": "null",
+ "endpoints": []
+ },
+ {
+ "name": "host",
+ "id": "062b6d9ea7913fde",
+ "type": "host",
+ "endpoints": []
+ },
+ ]
+
+ get = mock.Mock(return_value=response(
+ status_code=200, content=json.dumps(networks).encode('utf-8')))
+
+ with mock.patch('docker.api.client.APIClient.get', get):
+ assert self.client.networks() == networks
+
+ assert get.call_args[0][0] == url_prefix + 'networks'
+
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert not filters
+
+ self.client.networks(names=['foo'])
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert filters == {'name': ['foo']}
+
+ self.client.networks(ids=['123'])
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert filters == {'id': ['123']}
+
+ def test_create_network(self):
+ network_data = {
+ "id": 'abc12345',
+ "warning": "",
+ }
+
+ network_response = response(status_code=200, content=network_data)
+ post = mock.Mock(return_value=network_response)
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ result = self.client.create_network('foo')
+ assert result == network_data
+
+ assert post.call_args[0][0] == url_prefix + 'networks/create'
+
+ assert json.loads(post.call_args[1]['data']) == {"Name": "foo"}
+
+ opts = {
+ 'com.docker.network.bridge.enable_icc': False,
+ 'com.docker.network.bridge.enable_ip_masquerade': False,
+ }
+ self.client.create_network('foo', 'bridge', opts)
+
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "foo", "Driver": "bridge", "Options": opts
+ }
+
+ ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
+ gateway="192.168.52.254")
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
+
+ self.client.create_network("bar", driver="bridge",
+ ipam=ipam_config)
+
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "bar",
+ "Driver": "bridge",
+ "IPAM": {
+ "Driver": "default",
+ "Config": [{
+ "IPRange": None,
+ "Gateway": "192.168.52.254",
+ "Subnet": "192.168.52.0/24",
+ "AuxiliaryAddresses": None,
+ }],
+ }
+ }
+
+ def test_remove_network(self):
+ network_id = 'abc12345'
+ delete = mock.Mock(return_value=response(status_code=200))
+
+ with mock.patch('docker.api.client.APIClient.delete', delete):
+ self.client.remove_network(network_id)
+
+ args = delete.call_args
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+
+ def test_inspect_network(self):
+ network_id = 'abc12345'
+ network_name = 'foo'
+ network_data = {
+ six.u('name'): network_name,
+ six.u('id'): network_id,
+ six.u('driver'): 'bridge',
+ six.u('containers'): {},
+ }
+
+ network_response = response(status_code=200, content=network_data)
+ get = mock.Mock(return_value=network_response)
+
+ with mock.patch('docker.api.client.APIClient.get', get):
+ result = self.client.inspect_network(network_id)
+ assert result == network_data
+
+ args = get.call_args
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+
+ def test_connect_container_to_network(self):
+ network_id = 'abc12345'
+ container_id = 'def45678'
+
+ post = mock.Mock(return_value=response(status_code=201))
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ self.client.connect_container_to_network(
+ container={'Id': container_id},
+ net_id=network_id,
+ aliases=['foo', 'bar'],
+ links=[('baz', 'quux')]
+ )
+
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/connect'.format(network_id)
+ )
+
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id,
+ 'EndpointConfig': {
+ 'Aliases': ['foo', 'bar'],
+ 'Links': ['baz:quux'],
+ },
+ }
+
+ def test_disconnect_container_from_network(self):
+ network_id = 'abc12345'
+ container_id = 'def45678'
+
+ post = mock.Mock(return_value=response(status_code=201))
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ self.client.disconnect_container_from_network(
+ container={'Id': container_id}, net_id=network_id)
+
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/disconnect'.format(network_id)
+ )
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id
+ }
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 8faca6b..46cbd68 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,21 +1,21 @@
import datetime
import json
+import io
import os
import re
import shutil
import socket
-import sys
import tempfile
import threading
import time
-import io
+import unittest
import docker
+from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
-from .. import base
from . import fake_api
import pytest
@@ -83,24 +83,28 @@ def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
-def fake_read_from_socket(self, response, stream):
+def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
+
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
-class DockerClientTest(base.Cleanup, base.BaseTestCase):
+class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
- 'docker.Client', get=fake_get, post=fake_post, put=fake_put,
+ 'docker.api.client.APIClient',
+ get=fake_get,
+ post=fake_post,
+ put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
- self.client = docker.Client()
+ self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
@@ -108,11 +112,6 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
self.client.close()
self.patcher.stop()
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(DockerClientTest, self).assertIn(object, collection)
-
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
@@ -124,39 +123,32 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
}
-class DockerApiTest(DockerClientTest):
+class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
- docker.Client(version=1.12)
+ APIClient(version=1.12)
- self.assertEqual(
- str(excinfo.value),
- 'Version parameter must be a string or None. Found float'
- )
+ assert str(
+ excinfo.value
+ ) == 'Version parameter must be a string or None. Found float'
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
- self.assertEqual(
- url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
- )
+ assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
- self.assertEqual(
- url,
- '{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
+ assert url == '{0}{1}'.format(
+ url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
- self.assertEqual(
- url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
- )
+ assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
- self.assertEqual(
- url,
- '{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
+ assert url == '{0}{1}'.format(
+ url_prefix, 'images/localhost:5000/image/push'
)
def test_url_invalid_resource(self):
@@ -165,15 +157,13 @@ class DockerApiTest(DockerClientTest):
def test_url_no_resource(self):
url = self.client._url('/simple')
- self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
+ assert url == '{0}{1}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
- self.assertEqual(
- url, '{0}{1}'.format(url_base, 'hello/somename/world')
- )
+ assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
@@ -194,14 +184,14 @@ class DockerApiTest(DockerClientTest):
)
def test_retrieve_server_version(self):
- client = docker.Client(version="auto")
- self.assertTrue(isinstance(client._version, six.string_types))
- self.assertFalse(client._version == "auto")
+ client = APIClient(version="auto")
+ assert isinstance(client._version, six.string_types)
+ assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
- self.assertTrue(isinstance(version, six.string_types))
+ assert isinstance(version, six.string_types)
def test_info(self):
self.client.info()
@@ -222,6 +212,24 @@ class DockerApiTest(DockerClientTest):
timeout=DEFAULT_TIMEOUT_SECONDS
)
+ def test_login(self):
+ self.client.login('sakuya', 'izayoi')
+ args = fake_request.call_args
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'auth'
+ assert json.loads(args[1]['data']) == {
+ 'username': 'sakuya', 'password': 'izayoi'
+ }
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert self.client._auth_configs['auths'] == {
+ 'docker.io': {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
+ }
+ }
+
def test_events(self):
self.client.events()
@@ -229,7 +237,8 @@ class DockerApiTest(DockerClientTest):
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
- stream=True
+ stream=True,
+ timeout=None
)
def test_events_with_since_until(self):
@@ -248,7 +257,8 @@ class DockerApiTest(DockerClientTest):
'until': ts + 10,
'filters': None
},
- stream=True
+ stream=True,
+ timeout=None
)
def test_events_with_filters(self):
@@ -266,7 +276,8 @@ class DockerApiTest(DockerClientTest):
'until': None,
'filters': expected_filters
},
- stream=True
+ stream=True,
+ timeout=None
)
def _socket_path_for_client_session(self, client):
@@ -274,27 +285,27 @@ class DockerApiTest(DockerClientTest):
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
- c = docker.Client(base_url="unix://socket")
+ c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
- c = docker.Client(base_url="unix:///socket")
+ c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
- c = docker.Client(base_url="http+unix:///socket")
+ c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
- c = docker.Client(base_url="http://hostname:1234")
+ c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
- c = docker.Client(base_url="tcp://hostname:1234")
+ c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
@@ -311,11 +322,10 @@ class DockerApiTest(DockerClientTest):
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
- self.assertIn('SecurityOpt', result)
- self.assertEqual(result['SecurityOpt'], security_opt)
- self.assertRaises(
- TypeError, self.client.create_host_config, security_opt='wrong'
- )
+ assert 'SecurityOpt' in result
+ assert result['SecurityOpt'] == security_opt
+ with pytest.raises(TypeError):
+ self.client.create_host_config(security_opt='wrong')
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
@@ -327,35 +337,35 @@ class DockerApiTest(DockerClientTest):
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
- setattr(raw_resp._fp, 'chunk_left', len(body.getvalue())-1)
+ setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
- self.assertEqual(result, content_str)
+ assert result == content_str
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
- self.assertEqual(result, content)
+ assert result == content
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
- self.assertEqual(result, content_str.decode('utf-8'))
+ assert result == content_str.decode('utf-8')
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
- self.assertEqual(result, content)
+ assert result == content
-class StreamTest(base.Cleanup, base.BaseTestCase):
+class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
@@ -440,27 +450,25 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with docker.Client(base_url="http+unix://" + self.socket_file) \
- as client:
+ with APIClient(base_url="http+unix://" + self.socket_file) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
- stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
- self.assertEqual(list(stream), [
- str(i).encode() for i in range(50)])
+ assert list(stream) == [
+ str(i).encode() for i in range(50)]
-class UserAgentTest(base.BaseTestCase):
+class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
- docker.Client,
+ APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
@@ -470,18 +478,62 @@ class UserAgentTest(base.BaseTestCase):
self.patcher.stop()
def test_default_user_agent(self):
- client = docker.Client()
+ client = APIClient()
client.version()
- self.assertEqual(self.mock_send.call_count, 1)
+ assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
- expected = 'docker-py/%s' % docker.__version__
- self.assertEqual(headers['User-Agent'], expected)
+ expected = 'docker-sdk-python/%s' % docker.__version__
+ assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
- client = docker.Client(user_agent='foo/bar')
+ client = APIClient(user_agent='foo/bar')
client.version()
- self.assertEqual(self.mock_send.call_count, 1)
+ assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
- self.assertEqual(headers['User-Agent'], 'foo/bar')
+ assert headers['User-Agent'] == 'foo/bar'
+
+
+class DisableSocketTest(unittest.TestCase):
+ class DummySocket(object):
+ def __init__(self, timeout=60):
+ self.timeout = timeout
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
+
+ def gettimeout(self):
+ return self.timeout
+
+ def setUp(self):
+ self.client = APIClient()
+
+ def test_disable_socket_timeout(self):
+ """Test that the timeout is disabled on a generic socket object."""
+ socket = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+
+ def test_disable_socket_timeout2(self):
+ """Test that the timeouts are disabled on a generic socket object
+ and it's _sock object if present."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout is None
+
+ def test_disable_socket_timout_non_blocking(self):
+ """Test that a non-blocking socket does not get set to blocking."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket(0.0)
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout == 0.0
diff --git a/tests/unit/volume_test.py b/tests/unit/api_volume_test.py
index 136d11a..7850c22 100644
--- a/tests/unit/volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -2,22 +2,20 @@ import json
import pytest
-from .. import base
-from .api_test import DockerClientTest, url_prefix, fake_request
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
-class VolumeTest(DockerClientTest):
- @base.requires_api_version('1.21')
+class VolumeTest(BaseAPIClientTest):
def test_list_volumes(self):
volumes = self.client.volumes()
- self.assertIn('Volumes', volumes)
- self.assertEqual(len(volumes['Volumes']), 2)
+ assert 'Volumes' in volumes
+ assert len(volumes['Volumes']) == 2
args = fake_request.call_args
- self.assertEqual(args[0][0], 'GET')
- self.assertEqual(args[0][1], url_prefix + 'volumes')
+ assert args[0][0] == 'GET'
+ assert args[0][1] == url_prefix + 'volumes'
- @base.requires_api_version('1.21')
def test_list_volumes_and_filters(self):
volumes = self.client.volumes(filters={'dangling': True})
assert 'Volumes' in volumes
@@ -29,50 +27,47 @@ class VolumeTest(DockerClientTest):
assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'},
'timeout': 60}
- @base.requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
args = fake_request.call_args
- self.assertEqual(args[0][0], 'POST')
- self.assertEqual(args[0][1], url_prefix + 'volumes/create')
- self.assertEqual(json.loads(args[1]['data']), {'Name': name})
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
+ assert json.loads(args[1]['data']) == {'Name': name}
- @base.requires_api_version('1.23')
+ @requires_api_version('1.23')
def test_create_volume_with_labels(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name, labels={
- 'com.example.some-label': 'some-value'})
- self.assertEqual(
- result["Labels"],
- {'com.example.some-label': 'some-value'}
- )
+ 'com.example.some-label': 'some-value'
+ })
+ assert result["Labels"] == {
+ 'com.example.some-label': 'some-value'
+ }
- @base.requires_api_version('1.23')
+ @requires_api_version('1.23')
def test_create_volume_with_invalid_labels(self):
name = 'perfectcherryblossom'
with pytest.raises(TypeError):
self.client.create_volume(name, labels=1)
- @base.requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
self.client.create_volume(name, driver=driver_name)
args = fake_request.call_args
- self.assertEqual(args[0][0], 'POST')
- self.assertEqual(args[0][1], url_prefix + 'volumes/create')
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
data = json.loads(args[1]['data'])
- self.assertIn('Driver', data)
- self.assertEqual(data['Driver'], driver_name)
+ assert 'Driver' in data
+ assert data['Driver'] == driver_name
- @base.requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
@@ -89,24 +84,32 @@ class VolumeTest(DockerClientTest):
'perfectcherryblossom', driver_opts=''
)
- @base.requires_api_version('1.21')
+ @requires_api_version('1.24')
+ def test_create_volume_with_no_specified_name(self):
+ result = self.client.create_volume(name=None)
+ assert 'Name' in result
+ assert result['Name'] is not None
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+ assert 'Scope' in result
+ assert result['Scope'] == 'local'
+
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
- self.assertIn('Name', result)
- self.assertEqual(result['Name'], name)
- self.assertIn('Driver', result)
- self.assertEqual(result['Driver'], 'local')
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
args = fake_request.call_args
- self.assertEqual(args[0][0], 'GET')
- self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
+ assert args[0][0] == 'GET'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
- @base.requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
self.client.remove_volume(name)
args = fake_request.call_args
- self.assertEqual(args[0][0], 'DELETE')
- self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
+ assert args[0][0] == 'DELETE'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index f395133..ee32ca0 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -7,12 +7,10 @@ import os.path
import random
import shutil
import tempfile
+import unittest
-from docker import auth
-from docker.auth.auth import parse_auth
-from docker import errors
-
-from .. import base
+from docker import auth, errors
+import pytest
try:
from unittest import mock
@@ -20,7 +18,7 @@ except ImportError:
import mock
-class RegressionTest(base.BaseTestCase):
+class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
@@ -31,84 +29,70 @@ class RegressionTest(base.BaseTestCase):
assert b'_' in encoded
-class ResolveRepositoryNameTest(base.BaseTestCase):
+class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('image'),
- ('docker.io', 'image'),
+ assert auth.resolve_repository_name('image') == (
+ 'docker.io', 'image'
)
def test_resolve_repository_name_dotted_hub_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('image.valid'),
- ('docker.io', 'image.valid')
+ assert auth.resolve_repository_name('image.valid') == (
+ 'docker.io', 'image.valid'
)
def test_resolve_repository_name_hub_image(self):
- self.assertEqual(
- auth.resolve_repository_name('username/image'),
- ('docker.io', 'username/image'),
+ assert auth.resolve_repository_name('username/image') == (
+ 'docker.io', 'username/image'
)
def test_explicit_hub_index_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('docker.io/image'),
- ('docker.io', 'image')
+ assert auth.resolve_repository_name('docker.io/image') == (
+ 'docker.io', 'image'
)
def test_explicit_legacy_hub_index_library_image(self):
- self.assertEqual(
- auth.resolve_repository_name('index.docker.io/image'),
- ('docker.io', 'image')
+ assert auth.resolve_repository_name('index.docker.io/image') == (
+ 'docker.io', 'image'
)
def test_resolve_repository_name_private_registry(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net/image'),
- ('my.registry.net', 'image'),
+ assert auth.resolve_repository_name('my.registry.net/image') == (
+ 'my.registry.net', 'image'
)
def test_resolve_repository_name_private_registry_with_port(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net:5000/image'),
- ('my.registry.net:5000', 'image'),
+ assert auth.resolve_repository_name('my.registry.net:5000/image') == (
+ 'my.registry.net:5000', 'image'
)
def test_resolve_repository_name_private_registry_with_username(self):
- self.assertEqual(
- auth.resolve_repository_name('my.registry.net/username/image'),
- ('my.registry.net', 'username/image'),
- )
+ assert auth.resolve_repository_name(
+ 'my.registry.net/username/image'
+ ) == ('my.registry.net', 'username/image')
def test_resolve_repository_name_no_dots_but_port(self):
- self.assertEqual(
- auth.resolve_repository_name('hostname:5000/image'),
- ('hostname:5000', 'image'),
+ assert auth.resolve_repository_name('hostname:5000/image') == (
+ 'hostname:5000', 'image'
)
def test_resolve_repository_name_no_dots_but_port_and_username(self):
- self.assertEqual(
- auth.resolve_repository_name('hostname:5000/username/image'),
- ('hostname:5000', 'username/image'),
- )
+ assert auth.resolve_repository_name(
+ 'hostname:5000/username/image'
+ ) == ('hostname:5000', 'username/image')
def test_resolve_repository_name_localhost(self):
- self.assertEqual(
- auth.resolve_repository_name('localhost/image'),
- ('localhost', 'image'),
+ assert auth.resolve_repository_name('localhost/image') == (
+ 'localhost', 'image'
)
def test_resolve_repository_name_localhost_with_username(self):
- self.assertEqual(
- auth.resolve_repository_name('localhost/username/image'),
- ('localhost', 'username/image'),
+ assert auth.resolve_repository_name('localhost/username/image') == (
+ 'localhost', 'username/image'
)
def test_invalid_index_name(self):
- self.assertRaises(
- errors.InvalidRepository,
- lambda: auth.resolve_repository_name('-gecko.com/image')
- )
+ with pytest.raises(errors.InvalidRepository):
+ auth.resolve_repository_name('-gecko.com/image')
def encode_auth(auth_info):
@@ -117,167 +101,186 @@ def encode_auth(auth_info):
auth_info.get('password', '').encode('utf-8'))
-class ResolveAuthTest(base.BaseTestCase):
+class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = parse_auth({
- 'https://index.docker.io/v1/': index_config,
- 'my.registry.net': private_config,
- 'http://legacy.registry.url/v1/': legacy_config,
- })
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': index_config,
+ 'my.registry.net': private_config,
+ 'http://legacy.registry.url/v1/': legacy_config,
+ })
+ }
def test_resolve_authconfig_hostname_only(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_protocol(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_trailing_slash(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'https://my.registry.net'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://index.docker.io'
- )['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://index.docker.io'
+ )['username'] == 'indexuser'
def test_resolve_authconfig_path_wrong_proto(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'https://my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_default_registry(self):
- self.assertEqual(
- auth.resolve_authconfig(self.auth_config)['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config
+ )['username'] == 'indexuser'
def test_resolve_authconfig_default_explicit_none(self):
- self.assertEqual(
- auth.resolve_authconfig(self.auth_config, None)['username'],
- 'indexuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, None
+ )['username'] == 'indexuser'
def test_resolve_authconfig_fully_explicit(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'http://my.registry.net/v1/'
- )['username'],
- 'privateuser'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/v1/'
+ )['username'] == 'privateuser'
def test_resolve_authconfig_legacy_config(self):
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, 'legacy.registry.url'
- )['username'],
- 'legacyauth'
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'legacy.registry.url'
+ )['username'] == 'legacyauth'
def test_resolve_authconfig_no_match(self):
- self.assertTrue(
- auth.resolve_authconfig(self.auth_config, 'does.not.exist') is None
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, 'does.not.exist'
+ ) is None
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'indexuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- )['username'],
- 'privateuser',
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'privateuser'
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
- self.assertEqual(
- auth.resolve_authconfig(
- self.auth_config, auth.resolve_repository_name(image)[0]
- ),
- None,
- )
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ ) is None
+
+ def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': self.index_config,
+ }),
+ 'credsStore': 'blackbox'
+ }
+ with mock.patch('docker.auth._resolve_authconfig_credstore') as m:
+ m.return_value = None
+ assert 'indexuser' == auth.resolve_authconfig(
+ auth_config, None
+ )['username']
+
+
+class CredStoreTest(unittest.TestCase):
+ def test_get_credential_store(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ }
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = {
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ }
+ assert auth.get_credential_store(auth_config, None) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'docker.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'images.io'
+ ) == 'truesecret'
-class LoadConfigTest(base.Cleanup, base.BaseTestCase):
+
+class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = auth.load_config(folder)
- self.assertTrue(cfg is not None)
+ assert cfg is not None
def test_load_config(self):
folder = tempfile.mkdtemp()
@@ -289,12 +292,12 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(dockercfg_path)
assert auth.INDEX_NAME in cfg
- self.assertNotEqual(cfg[auth.INDEX_NAME], None)
+ assert cfg[auth.INDEX_NAME] is not None
cfg = cfg[auth.INDEX_NAME]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
@@ -317,12 +320,12 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
+ assert cfg[registry] is not None
cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
@@ -344,12 +347,12 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
+ assert cfg[registry] is not None
cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env_with_auths(self):
folder = tempfile.mkdtemp()
@@ -372,13 +375,12 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
- cfg = cfg[registry]
- self.assertEqual(cfg['username'], 'sakuya')
- self.assertEqual(cfg['password'], 'izayoi')
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_custom_config_env_utf8(self):
folder = tempfile.mkdtemp()
@@ -402,37 +404,12 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
- assert registry in cfg
- self.assertNotEqual(cfg[registry], None)
- cfg = cfg[registry]
- self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
- self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
- self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
- self.assertEqual(cfg.get('auth'), None)
-
- def test_load_config_custom_config_env_with_headers(self):
- folder = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, folder)
-
- dockercfg_path = os.path.join(folder, 'config.json')
- config = {
- 'HttpHeaders': {
- 'Name': 'Spike',
- 'Surname': 'Spiegel'
- },
- }
-
- with open(dockercfg_path, 'w') as f:
- json.dump(config, f)
-
- with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
- cfg = auth.load_config(None)
- assert 'HttpHeaders' in cfg
- self.assertNotEqual(cfg['HttpHeaders'], None)
- cfg = cfg['HttpHeaders']
-
- self.assertEqual(cfg['Name'], 'Spike')
- self.assertEqual(cfg['Surname'], 'Spiegel')
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
+ assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
def test_load_config_unknown_keys(self):
folder = tempfile.mkdtemp()
@@ -460,7 +437,7 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert cfg == {'scarlet.net': {}}
+ assert cfg == {'auths': {'scarlet.net': {}}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
@@ -481,7 +458,7 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
- assert registry in cfg
- cfg = cfg[registry]
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index 6ceb8cb..cce99c5 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -1,14 +1,82 @@
+import datetime
+import docker
+from docker.utils import kwargs_from_env
+from docker.constants import (
+ DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
+)
import os
-from docker.client import Client
-from .. import base
+import unittest
-TEST_CERT_DIR = os.path.join(
- os.path.dirname(__file__),
- 'testdata/certs',
-)
+from . import fake_api
+import pytest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+
+
+class ClientTest(unittest.TestCase):
+
+ @mock.patch('docker.api.APIClient.events')
+ def test_events(self, mock_func):
+ since = datetime.datetime(2016, 1, 1, 0, 0)
+ mock_func.return_value = fake_api.get_fake_events()[1]
+ client = docker.from_env()
+ assert client.events(since=since) == mock_func.return_value
+ mock_func.assert_called_with(since=since)
+
+ @mock.patch('docker.api.APIClient.info')
+ def test_info(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_info()[1]
+ client = docker.from_env()
+ assert client.info() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.ping')
+ def test_ping(self, mock_func):
+ mock_func.return_value = True
+ client = docker.from_env()
+ assert client.ping() is True
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.version')
+ def test_version(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_version()[1]
+ client = docker.from_env()
+ assert client.version() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ def test_call_api_client_method(self):
+ client = docker.from_env()
+ with pytest.raises(AttributeError) as cm:
+ client.create_container()
+ s = cm.exconly()
+ assert "'DockerClient' object has no attribute 'create_container'" in s
+ assert "this method is now on the object APIClient" in s
+
+ with pytest.raises(AttributeError) as cm:
+ client.abcdef()
+ s = cm.exconly()
+ assert "'DockerClient' object has no attribute 'abcdef'" in s
+ assert "this method is now on the object APIClient" not in s
+
+ def test_call_containers(self):
+ client = docker.DockerClient(**kwargs_from_env())
+ with pytest.raises(TypeError) as cm:
+ client.containers()
+
+ s = cm.exconly()
+ assert "'ContainerCollection' object is not callable" in s
+ assert "docker.APIClient" in s
+
+
+class FromEnvTest(unittest.TestCase):
-class ClientTest(base.BaseTestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -22,57 +90,23 @@ class ClientTest(base.BaseTestCase):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env()
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
+ client = docker.from_env()
+ assert client.api.base_url == "https://192.168.59.103:2376"
def test_from_env_with_version(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env(version='2.32')
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
- self.assertEqual(client._version, '2.32')
-
-
-class DisableSocketTest(base.BaseTestCase):
- class DummySocket(object):
- def __init__(self, timeout=60):
- self.timeout = timeout
-
- def settimeout(self, timeout):
- self.timeout = timeout
-
- def gettimeout(self):
- return self.timeout
-
- def setUp(self):
- self.client = Client()
-
- def test_disable_socket_timeout(self):
- """Test that the timeout is disabled on a generic socket object."""
- socket = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
-
- self.assertEqual(socket.timeout, None)
-
- def test_disable_socket_timeout2(self):
- """Test that the timeouts are disabled on a generic socket object
- and it's _sock object if present."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
+ client = docker.from_env(version='2.32')
+ assert client.api.base_url == "https://192.168.59.103:2376"
+ assert client.api._version == '2.32'
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, None)
+ def test_from_env_without_version_uses_default(self):
+ client = docker.from_env()
- def test_disable_socket_timout_non_blocking(self):
- """Test that a non-blocking socket does not get set to blocking."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket(0.0)
+ assert client.api._version == DEFAULT_DOCKER_API_VERSION
- self.client._disable_socket_timeout(socket)
+ def test_from_env_without_timeout_uses_default(self):
+ client = docker.from_env()
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, 0.0)
+ assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
new file mode 100644
index 0000000..2be0578
--- /dev/null
+++ b/tests/unit/dockertypes_test.py
@@ -0,0 +1,470 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+
+import pytest
+
+from docker.constants import DEFAULT_DOCKER_API_VERSION
+from docker.errors import InvalidArgument, InvalidVersion
+from docker.types import (
+ ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
+ IPAMPool, LogConfig, Mount, ServiceMode, Ulimit,
+)
+from docker.types.services import convert_service_ports
+
+try:
+ from unittest import mock
+except:
+ import mock
+
+
+def create_host_config(*args, **kwargs):
+ return HostConfig(*args, **kwargs)
+
+
+class HostConfigTest(unittest.TestCase):
+ def test_create_host_config_no_options_newer_api_version(self):
+ config = create_host_config(version='1.21')
+ assert config['NetworkMode'] == 'default'
+
+ def test_create_host_config_invalid_cpu_cfs_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_quota='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_period='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_quota=23.11)
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_period=1999.0)
+
+ def test_create_host_config_with_cpu_quota(self):
+ config = create_host_config(version='1.21', cpu_quota=1999)
+ assert config.get('CpuQuota') == 1999
+
+ def test_create_host_config_with_cpu_period(self):
+ config = create_host_config(version='1.21', cpu_period=1999)
+ assert config.get('CpuPeriod') == 1999
+
+ def test_create_host_config_with_blkio_constraints(self):
+ blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
+ config = create_host_config(
+ version='1.22', blkio_weight=1999, blkio_weight_device=blkio_rate,
+ device_read_bps=blkio_rate, device_write_bps=blkio_rate,
+ device_read_iops=blkio_rate, device_write_iops=blkio_rate
+ )
+
+ assert config.get('BlkioWeight') == 1999
+ assert config.get('BlkioWeightDevice') is blkio_rate
+ assert config.get('BlkioDeviceReadBps') is blkio_rate
+ assert config.get('BlkioDeviceWriteBps') is blkio_rate
+ assert config.get('BlkioDeviceReadIOps') is blkio_rate
+ assert config.get('BlkioDeviceWriteIOps') is blkio_rate
+ assert blkio_rate[0]['Path'] == "/dev/sda"
+ assert blkio_rate[0]['Rate'] == 1000
+
+ def test_create_host_config_with_shm_size(self):
+ config = create_host_config(version='1.22', shm_size=67108864)
+ assert config.get('ShmSize') == 67108864
+
+ def test_create_host_config_with_shm_size_in_mb(self):
+ config = create_host_config(version='1.22', shm_size='64M')
+ assert config.get('ShmSize') == 67108864
+
+ def test_create_host_config_with_oom_kill_disable(self):
+ config = create_host_config(version='1.21', oom_kill_disable=True)
+ assert config.get('OomKillDisable') is True
+
+ def test_create_host_config_with_userns_mode(self):
+ config = create_host_config(version='1.23', userns_mode='host')
+ assert config.get('UsernsMode') == 'host'
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', userns_mode='host')
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', userns_mode='host12')
+
+ def test_create_host_config_with_oom_score_adj(self):
+ config = create_host_config(version='1.22', oom_score_adj=100)
+ assert config.get('OomScoreAdj') == 100
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.21', oom_score_adj=100)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.22', oom_score_adj='100')
+
+ def test_create_host_config_with_dns_opt(self):
+
+ tested_opts = ['use-vc', 'no-tld-query']
+ config = create_host_config(version='1.21', dns_opt=tested_opts)
+ dns_opts = config.get('DnsOptions')
+
+ assert 'use-vc' in dns_opts
+ assert 'no-tld-query' in dns_opts
+
+ def test_create_host_config_with_mem_reservation(self):
+ config = create_host_config(version='1.21', mem_reservation=67108864)
+ assert config.get('MemoryReservation') == 67108864
+
+ def test_create_host_config_with_kernel_memory(self):
+ config = create_host_config(version='1.21', kernel_memory=67108864)
+ assert config.get('KernelMemory') == 67108864
+
+ def test_create_host_config_with_pids_limit(self):
+ config = create_host_config(version='1.23', pids_limit=1024)
+ assert config.get('PidsLimit') == 1024
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', pids_limit=1024)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.23', pids_limit='1024')
+
+ def test_create_host_config_with_isolation(self):
+ config = create_host_config(version='1.24', isolation='hyperv')
+ assert config.get('Isolation') == 'hyperv'
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.23', isolation='hyperv')
+ with pytest.raises(TypeError):
+ create_host_config(
+ version='1.24', isolation={'isolation': 'hyperv'}
+ )
+
+ def test_create_host_config_pid_mode(self):
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', pid_mode='baccab125')
+
+ config = create_host_config(version='1.23', pid_mode='host')
+ assert config.get('PidMode') == 'host'
+ config = create_host_config(version='1.24', pid_mode='baccab125')
+ assert config.get('PidMode') == 'baccab125'
+
+ def test_create_host_config_invalid_mem_swappiness(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.24', mem_swappiness='40')
+
+ def test_create_host_config_with_volume_driver(self):
+ config = create_host_config(version='1.21', volume_driver='local')
+ assert config.get('VolumeDriver') == 'local'
+
+ def test_create_host_config_invalid_cpu_count_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_count='1')
+
+ def test_create_host_config_with_cpu_count(self):
+ config = create_host_config(version='1.25', cpu_count=2)
+ assert config.get('CpuCount') == 2
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_count=1)
+
+ def test_create_host_config_invalid_cpu_percent_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_percent='1')
+
+ def test_create_host_config_with_cpu_percent(self):
+ config = create_host_config(version='1.25', cpu_percent=15)
+ assert config.get('CpuPercent') == 15
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_percent=10)
+
+ def test_create_host_config_invalid_nano_cpus_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', nano_cpus='0')
+
+ def test_create_host_config_with_nano_cpus(self):
+ config = create_host_config(version='1.25', nano_cpus=1000)
+ assert config.get('NanoCpus') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', nano_cpus=1)
+
+ def test_create_host_config_with_cpu_rt_period_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_period='1000')
+
+ def test_create_host_config_with_cpu_rt_period(self):
+ config = create_host_config(version='1.25', cpu_rt_period=1000)
+ assert config.get('CPURealtimePeriod') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_period=1000)
+
+ def test_ctrate_host_config_with_cpu_rt_runtime_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_runtime='1000')
+
+ def test_create_host_config_with_cpu_rt_runtime(self):
+ config = create_host_config(version='1.25', cpu_rt_runtime=1000)
+ assert config.get('CPURealtimeRuntime') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_runtime=1000)
+
+
+class ContainerSpecTest(unittest.TestCase):
+ def test_parse_mounts(self):
+ spec = ContainerSpec(
+ image='scratch', mounts=[
+ '/local:/container',
+ '/local2:/container2:ro',
+ Mount(target='/target', source='/source')
+ ]
+ )
+
+ assert 'Mounts' in spec
+ assert len(spec['Mounts']) == 3
+ for mount in spec['Mounts']:
+ assert isinstance(mount, Mount)
+
+
+class UlimitTest(unittest.TestCase):
+ def test_create_host_config_dict_ulimit(self):
+ ulimit_dct = {'name': 'nofile', 'soft': 8096}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['name']
+ assert ulimit_obj.soft == ulimit_dct['soft']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
+
+ def test_create_host_config_dict_ulimit_capitals(self):
+ ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['Name']
+ assert ulimit_obj.soft == ulimit_dct['Soft']
+ assert ulimit_obj.hard == ulimit_dct['Hard']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
+
+ def test_create_host_config_obj_ulimit(self):
+ ulimit_dct = Ulimit(name='nofile', soft=8096)
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj == ulimit_dct
+
+ def test_ulimit_invalid_type(self):
+ with pytest.raises(ValueError):
+ Ulimit(name=None)
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', soft='123')
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', hard='456')
+
+
+class LogConfigTest(unittest.TestCase):
+ def test_create_host_config_dict_logconfig(self):
+ dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=dct
+ )
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert dct['type'] == config['LogConfig'].type
+
+ def test_create_host_config_obj_logconfig(self):
+ obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=obj
+ )
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert obj == config['LogConfig']
+
+ def test_logconfig_invalid_config_type(self):
+ with pytest.raises(ValueError):
+ LogConfig(type=LogConfig.types.JSON, config='helloworld')
+
+
+class EndpointConfigTest(unittest.TestCase):
+ def test_create_endpoint_config_with_aliases(self):
+ config = EndpointConfig(version='1.22', aliases=['foo', 'bar'])
+ assert config == {'Aliases': ['foo', 'bar']}
+
+ with pytest.raises(InvalidVersion):
+ EndpointConfig(version='1.21', aliases=['foo', 'bar'])
+
+
+class IPAMConfigTest(unittest.TestCase):
+ def test_create_ipam_config(self):
+ ipam_pool = IPAMPool(subnet='192.168.52.0/24',
+ gateway='192.168.52.254')
+
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool])
+ assert ipam_config == {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '192.168.52.0/24',
+ 'Gateway': '192.168.52.254',
+ 'AuxiliaryAddresses': None,
+ 'IPRange': None,
+ }]
+ }
+
+
+class ServiceModeTest(unittest.TestCase):
+ def test_replicated_simple(self):
+ mode = ServiceMode('replicated')
+ assert mode == {'replicated': {}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas is None
+
+ def test_global_simple(self):
+ mode = ServiceMode('global')
+ assert mode == {'global': {}}
+ assert mode.mode == 'global'
+ assert mode.replicas is None
+
+ def test_global_replicas_error(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('global', 21)
+
+ def test_replicated_replicas(self):
+ mode = ServiceMode('replicated', 21)
+ assert mode == {'replicated': {'Replicas': 21}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 21
+
+ def test_replicated_replicas_0(self):
+ mode = ServiceMode('replicated', 0)
+ assert mode == {'replicated': {'Replicas': 0}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 0
+
+ def test_invalid_mode(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('foobar')
+
+
+class MountTest(unittest.TestCase):
+ def test_parse_mount_string_ro(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:ro")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['ReadOnly'] is True
+
+ def test_parse_mount_string_rw(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:rw")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_short_form(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_no_source(self):
+ mount = Mount.parse_mount_string("foo/bar")
+ assert mount['Source'] is None
+ assert mount['Target'] == "foo/bar"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_invalid(self):
+ with pytest.raises(InvalidArgument):
+ Mount.parse_mount_string("foo:bar:baz:rw")
+
+ def test_parse_mount_named_volume(self):
+ mount = Mount.parse_mount_string("foobar:/baz")
+ assert mount['Source'] == 'foobar'
+ assert mount['Target'] == '/baz'
+ assert mount['Type'] == 'volume'
+
+ def test_parse_mount_bind(self):
+ mount = Mount.parse_mount_string('/foo/bar:/baz')
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
+
+ @pytest.mark.xfail
+ def test_parse_mount_bind_windows(self):
+ with mock.patch('docker.types.services.IS_WINDOWS_PLATFORM', True):
+ mount = Mount.parse_mount_string('C:/foo/bar:/baz')
+ assert mount['Source'] == "C:/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
+
+
+class ServicePortsTest(unittest.TestCase):
+ def test_convert_service_ports_simple(self):
+ ports = {8080: 80}
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol(self):
+ ports = {8080: (80, 'udp')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol_and_mode(self):
+ ports = {8080: (80, 'udp', 'ingress')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'ingress',
+ }]
+
+ def test_convert_service_ports_invalid(self):
+ ports = {8080: ('way', 'too', 'many', 'items', 'here')}
+
+ with pytest.raises(ValueError):
+ convert_service_ports(ports)
+
+ def test_convert_service_ports_no_protocol_and_mode(self):
+ ports = {8080: (80, None, 'host')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ }]
+
+ def test_convert_service_ports_multiple(self):
+ ports = {
+ 8080: (80, None, 'host'),
+ 9999: 99,
+ 2375: (2375,)
+ }
+
+ converted_ports = convert_service_ports(ports)
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 9999,
+ 'TargetPort': 99,
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 2375,
+ 'TargetPort': 2375,
+ } in converted_ports
+
+ assert len(converted_ports) == 3
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
new file mode 100644
index 0000000..e27a9b1
--- /dev/null
+++ b/tests/unit/errors_test.py
@@ -0,0 +1,133 @@
+import unittest
+
+import requests
+
+from docker.errors import (APIError, ContainerError, DockerException,
+ create_unexpected_kwargs_error,
+ create_api_error_from_http_exception)
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class APIErrorTest(unittest.TestCase):
+ def test_api_error_is_caught_by_dockerexception(self):
+ try:
+ raise APIError("this should be caught by DockerException")
+ except DockerException:
+ pass
+
+ def test_status_code_200(self):
+ """The status_code property is present with 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.status_code == 200
+
+ def test_status_code_400(self):
+ """The status_code property is present with 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.status_code == 400
+
+ def test_status_code_500(self):
+ """The status_code property is present with 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.status_code == 500
+
+ def test_is_server_error_200(self):
+ """Report not server error on 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_300(self):
+ """Report not server error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_400(self):
+ """Report not server error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_500(self):
+ """Report server error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_server_error() is True
+
+ def test_is_client_error_500(self):
+ """Report not client error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_client_error() is False
+
+ def test_is_client_error_400(self):
+ """Report client error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_client_error() is True
+
+ def test_create_error_from_exception(self):
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ try:
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
+
+
+class ContainerErrorTest(unittest.TestCase):
+ def test_container_without_stderr(self):
+ """The massage does not contain stderr"""
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ command = "echo Hello World"
+ exit_status = 42
+ image = FAKE_IMAGE_ID
+ stderr = None
+
+ err = ContainerError(container, exit_status, command, image, stderr)
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}"
+ ).format(command, image, exit_status, stderr)
+ assert str(err) == msg
+
+ def test_container_with_stderr(self):
+ """The massage contains stderr"""
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ command = "echo Hello World"
+ exit_status = 42
+ image = FAKE_IMAGE_ID
+ stderr = "Something went wrong"
+
+ err = ContainerError(container, exit_status, command, image, stderr)
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
+ "{}").format(command, image, exit_status, stderr)
+ assert str(err) == msg
+
+
+class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
+ def test_create_unexpected_kwargs_error_single(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
+ assert str(e) == "f() got an unexpected keyword argument 'foo'"
+
+ def test_create_unexpected_kwargs_error_multiple(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
+ assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
diff --git a/tests/unit/exec_test.py b/tests/unit/exec_test.py
deleted file mode 100644
index 6ba2a3d..0000000
--- a/tests/unit/exec_test.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import json
-
-from . import fake_api
-from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
-)
-
-
-class ExecTest(DockerClientTest):
- def test_exec_create(self):
- self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
-
- args = fake_request.call_args
- self.assertEqual(
- 'POST',
- args[0][0], url_prefix + 'containers/{0}/exec'.format(
- fake_api.FAKE_CONTAINER_ID
- )
- )
-
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'AttachStdout': True,
- 'Container': fake_api.FAKE_CONTAINER_ID,
- 'Cmd': ['ls', '-1'],
- 'Privileged': False,
- 'AttachStdin': False,
- 'AttachStderr': True,
- 'User': ''
- }
- )
-
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
-
- def test_exec_start(self):
- self.client.exec_start(fake_api.FAKE_EXEC_ID)
-
- args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/start'.format(
- fake_api.FAKE_EXEC_ID
- )
- )
-
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'Detach': False,
- }
- )
-
- self.assertEqual(
- args[1]['headers'], {
- 'Content-Type': 'application/json',
- 'Connection': 'Upgrade',
- 'Upgrade': 'tcp'
- }
- )
-
- def test_exec_start_detached(self):
- self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
-
- args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/start'.format(
- fake_api.FAKE_EXEC_ID
- )
- )
-
- self.assertEqual(
- json.loads(args[1]['data']), {
- 'Tty': False,
- 'Detach': True
- }
- )
-
- self.assertEqual(
- args[1]['headers'], {
- 'Content-Type': 'application/json'
- }
- )
-
- def test_exec_inspect(self):
- self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
-
- args = fake_request.call_args
- self.assertEqual(
- args[0][1], url_prefix + 'exec/{0}/json'.format(
- fake_api.FAKE_EXEC_ID
- )
- )
-
- def test_exec_resize(self):
- self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
-
- fake_request.assert_called_with(
- 'POST',
- url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
- params={'h': 20, 'w': 60},
- timeout=DEFAULT_TIMEOUT_SECONDS
- )
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index 65a8c42..e609b64 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -6,6 +6,7 @@ CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
+FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
@@ -14,26 +15,42 @@ FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
+FAKE_NODE_ID = '24ifsmvkjbyhk'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
-def get_fake_raw_version():
+def get_fake_version():
status_code = 200
response = {
- "ApiVersion": "1.18",
- "GitCommit": "fake-commit",
- "GoVersion": "go1.3.3",
- "Version": "1.5.0"
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Components': [{
+ 'Details': {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Experimental': 'false',
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux'
+ },
+ 'Name': 'Engine',
+ 'Version': '18.01.0-ce'
+ }],
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux',
+ 'Platform': {'Name': ''},
+ 'Version': '18.01.0-ce'
}
- return status_code, response
-
-def get_fake_version():
- status_code = 200
- response = {'GoVersion': '1', 'Version': '1.1.1',
- 'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
@@ -45,6 +62,17 @@ def get_fake_info():
return status_code, response
+def post_fake_auth():
+ status_code = 200
+ response = {'Status': 'Login Succeeded',
+ 'IdentityToken': '9cbaf023786cd7'}
+ return status_code, response
+
+
+def get_fake_ping():
+ return 200, "OK"
+
+
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
@@ -121,16 +149,24 @@ def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
- 'Config': {'Privileged': True, 'Tty': tty},
+ 'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
+ 'Name': 'foobar',
"State": {
+ "Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
+ "HostConfig": {
+ "LogConfig": {
+ "Type": "json-file",
+ "Config": {}
+ },
+ },
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
@@ -139,11 +175,12 @@ def get_fake_inspect_container(tty=False):
def get_fake_inspect_image():
status_code = 200
response = {
- 'id': FAKE_IMAGE_ID,
- 'parent': "27cf784147099545",
- 'created': "2013-03-23T22:24:18.818426-07:00",
- 'container': FAKE_CONTAINER_ID,
- 'container_config':
+ 'Id': FAKE_IMAGE_ID,
+ 'Parent': "27cf784147099545",
+ 'Created': "2013-03-23T22:24:18.818426-07:00",
+ 'Container': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'bar': 'foo'}},
+ 'ContainerConfig':
{
"Hostname": "",
"User": "",
@@ -183,7 +220,9 @@ def get_fake_wait():
def get_fake_logs():
status_code = 200
- response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x00'
+ b'\x02\x00\x00\x00\x00\x00\x00\x00'
+ b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
@@ -374,11 +413,13 @@ def get_fake_volume_list():
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism'
+ 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
+ 'Scope': 'local'
}
]
}
@@ -393,7 +434,8 @@ def get_fake_volume():
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
- }
+ },
+ 'Scope': 'local'
}
return status_code, response
@@ -406,18 +448,85 @@ def post_fake_update_container():
return 200, {'Warnings': []}
+def post_fake_update_node():
+ return 200, None
+
+
+def post_fake_join_swarm():
+ return 200, None
+
+
+def get_fake_network_list():
+ return 200, [{
+ "Name": "bridge",
+ "Id": FAKE_NETWORK_ID,
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": False,
+ "Internal": False,
+ "IPAM": {
+ "Driver": "default",
+ "Config": [
+ {
+ "Subnet": "172.17.0.0/16"
+ }
+ ]
+ },
+ "Containers": {
+ FAKE_CONTAINER_ID: {
+ "EndpointID": "ed2419a97c1d99",
+ "MacAddress": "02:42:ac:11:00:02",
+ "IPv4Address": "172.17.0.2/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500"
+ }
+ }]
+
+
+def get_fake_network():
+ return 200, get_fake_network_list()[1][0]
+
+
+def post_fake_network():
+ return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
+
+
+def delete_fake_network():
+ return 204, None
+
+
+def post_fake_network_connect():
+ return 200, None
+
+
+def post_fake_network_disconnect():
+ return 200, None
+
+
# Maps real api url to fake response callback
-prefix = 'http+docker://localunixsocket'
+prefix = 'http+docker://localhost'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
'{0}/version'.format(prefix):
- get_fake_raw_version,
+ get_fake_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
+ '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ post_fake_auth,
+ '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ get_fake_ping,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
@@ -507,4 +616,30 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
+ ('{1}/{0}/nodes/{2}/update?version=1'.format(
+ CURRENT_VERSION, prefix, FAKE_NODE_ID
+ ), 'POST'):
+ post_fake_update_node,
+ ('{1}/{0}/swarm/join'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_join_swarm,
+ ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ get_fake_network_list,
+ ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'GET'):
+ get_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'DELETE'):
+ delete_fake_network,
+ ('{1}/{0}/networks/{2}/connect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_connect,
+ ('{1}/{0}/networks/{2}/disconnect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_disconnect,
}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
new file mode 100644
index 0000000..15b60ea
--- /dev/null
+++ b/tests/unit/fake_api_client.py
@@ -0,0 +1,61 @@
+import copy
+import docker
+
+from . import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class CopyReturnMagicMock(mock.MagicMock):
+ """
+ A MagicMock which deep copies every return value.
+ """
+ def _mock_call(self, *args, **kwargs):
+ ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ if isinstance(ret, (dict, list)):
+ ret = copy.deepcopy(ret)
+ return ret
+
+
+def make_fake_api_client():
+ """
+ Returns non-complete fake APIClient.
+
+ This returns most of the default cases correctly, but most arguments that
+ change behaviour will not work.
+ """
+ api_client = docker.APIClient()
+ mock_client = CopyReturnMagicMock(**{
+ 'build.return_value': fake_api.FAKE_IMAGE_ID,
+ 'commit.return_value': fake_api.post_fake_commit()[1],
+ 'containers.return_value': fake_api.get_fake_containers()[1],
+ 'create_container.return_value':
+ fake_api.post_fake_create_container()[1],
+ 'create_host_config.side_effect': api_client.create_host_config,
+ 'create_network.return_value': fake_api.post_fake_network()[1],
+ 'exec_create.return_value': fake_api.post_fake_exec_create()[1],
+ 'exec_start.return_value': fake_api.post_fake_exec_start()[1],
+ 'images.return_value': fake_api.get_fake_images()[1],
+ 'inspect_container.return_value':
+ fake_api.get_fake_inspect_container()[1],
+ 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
+ 'inspect_network.return_value': fake_api.get_fake_network()[1],
+ 'logs.return_value': [b'hello world\n'],
+ 'networks.return_value': fake_api.get_fake_network_list()[1],
+ 'start.return_value': None,
+ 'wait.return_value': {'StatusCode': 0},
+ })
+ mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
+ return mock_client
+
+
+def make_fake_client():
+ """
+ Returns a Client with a fake APIClient.
+ """
+ client = docker.DockerClient()
+ client.api = make_fake_api_client()
+ return client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
new file mode 100644
index 0000000..2b0b499
--- /dev/null
+++ b/tests/unit/models_containers_test.py
@@ -0,0 +1,538 @@
+import docker
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
+from docker.models.containers import Container, _create_container_args
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
+from .fake_api_client import make_fake_client
+import pytest
+
+
+class ContainerCollectionTest(unittest.TestCase):
+ def test_run(self):
+ client = make_fake_client()
+ out = client.containers.run("alpine", "echo hello world")
+
+ assert out == b'hello world\n'
+
+ client.api.create_container.assert_called_with(
+ image="alpine",
+ command="echo hello world",
+ detach=False,
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.logs.assert_called_with(
+ FAKE_CONTAINER_ID, stderr=False, stdout=True, stream=True,
+ follow=True
+ )
+
+ def test_create_container_args(self):
+ create_kwargs = _create_container_args(dict(
+ image='alpine',
+ command='echo hello world',
+ blkio_weight_device=[{'Path': 'foo', 'Weight': 3}],
+ blkio_weight=2,
+ cap_add=['foo'],
+ cap_drop=['bar'],
+ cgroup_parent='foobar',
+ cpu_period=1,
+ cpu_quota=2,
+ cpu_shares=5,
+ cpuset_cpus='0-3',
+ detach=False,
+ device_read_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_read_iops=[{'Path': 'foo', 'Rate': 3}],
+ device_write_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_write_iops=[{'Path': 'foo', 'Rate': 3}],
+ devices=['/dev/sda:/dev/xvda:rwm'],
+ dns=['8.8.8.8'],
+ domainname='example.com',
+ dns_opt=['foo'],
+ dns_search=['example.com'],
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ extra_hosts={'foo': '1.2.3.4'},
+ group_add=['blah'],
+ ipc_mode='foo',
+ kernel_memory=123,
+ labels={'key': 'value'},
+ links={'foo': 'bar'},
+ log_config={'Type': 'json-file', 'Config': {}},
+ lxc_conf={'foo': 'bar'},
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ mac_address='abc123',
+ mem_limit=123,
+ mem_reservation=123,
+ mem_swappiness=2,
+ memswap_limit=456,
+ name='somename',
+ network_disabled=False,
+ network='foo',
+ oom_kill_disable=True,
+ oom_score_adj=5,
+ pid_mode='host',
+ pids_limit=500,
+ ports={
+ 1111: 4567,
+ 2222: None
+ },
+ privileged=True,
+ publish_all_ports=True,
+ read_only=True,
+ restart_policy={'Name': 'always'},
+ security_opt=['blah'],
+ shm_size=123,
+ stdin_open=True,
+ stop_signal=9,
+ sysctls={'foo': 'bar'},
+ tmpfs={'/blah': ''},
+ tty=True,
+ ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ user='bob',
+ userns_mode='host',
+ version='1.23',
+ volume_driver='some_driver',
+ volumes=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
+ ],
+ volumes_from=['container'],
+ working_dir='/code'
+ ))
+
+ expected = dict(
+ image='alpine',
+ command='echo hello world',
+ domainname='example.com',
+ detach=False,
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ host_config={
+ 'Binds': [
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
+ ],
+ 'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}],
+ 'BlkioWeight': 2,
+ 'CapAdd': ['foo'],
+ 'CapDrop': ['bar'],
+ 'CgroupParent': 'foobar',
+ 'CpuPeriod': 1,
+ 'CpuQuota': 2,
+ 'CpuShares': 5,
+ 'CpusetCpus': '0-3',
+ 'Devices': [{'PathOnHost': '/dev/sda',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvda'}],
+ 'Dns': ['8.8.8.8'],
+ 'DnsOptions': ['foo'],
+ 'DnsSearch': ['example.com'],
+ 'ExtraHosts': ['foo:1.2.3.4'],
+ 'GroupAdd': ['blah'],
+ 'IpcMode': 'foo',
+ 'KernelMemory': 123,
+ 'Links': ['foo:bar'],
+ 'LogConfig': {'Type': 'json-file', 'Config': {}},
+ 'LxcConf': [{'Key': 'foo', 'Value': 'bar'}],
+ 'Memory': 123,
+ 'MemoryReservation': 123,
+ 'MemorySwap': 456,
+ 'MemorySwappiness': 2,
+ 'NetworkMode': 'foo',
+ 'OomKillDisable': True,
+ 'OomScoreAdj': 5,
+ 'PidMode': 'host',
+ 'PidsLimit': 500,
+ 'PortBindings': {
+ '1111/tcp': [{'HostIp': '', 'HostPort': '4567'}],
+ '2222/tcp': [{'HostIp': '', 'HostPort': ''}]
+ },
+ 'Privileged': True,
+ 'PublishAllPorts': True,
+ 'ReadonlyRootfs': True,
+ 'RestartPolicy': {'Name': 'always'},
+ 'SecurityOpt': ['blah'],
+ 'ShmSize': 123,
+ 'Sysctls': {'foo': 'bar'},
+ 'Tmpfs': {'/blah': ''},
+ 'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ 'UsernsMode': 'host',
+ 'VolumesFrom': ['container'],
+ },
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ labels={'key': 'value'},
+ mac_address='abc123',
+ name='somename',
+ network_disabled=False,
+ networking_config={'foo': None},
+ ports=[('1111', 'tcp'), ('2222', 'tcp')],
+ stdin_open=True,
+ stop_signal=9,
+ tty=True,
+ user='bob',
+ volume_driver='some_driver',
+ volumes=[
+ '/mnt/vol2',
+ '/mnt/vol1',
+ '/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath',
+ 'D:\\hello\\world'
+ ],
+ working_dir='/code'
+ )
+
+ assert create_kwargs == expected
+
+ def test_run_detach(self):
+ client = make_fake_client()
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='sleep 300',
+ detach=True,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_run_pull(self):
+ client = make_fake_client()
+
+ # raise exception on first call, then return normal value
+ client.api.create_container.side_effect = [
+ docker.errors.ImageNotFound(""),
+ client.api.create_container.return_value
+ ]
+
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.pull.assert_called_with('alpine', platform=None, tag=None)
+
+ def test_run_with_error(self):
+ client = make_fake_client()
+ client.api.logs.return_value = "some error"
+ client.api.wait.return_value = {'StatusCode': 1}
+
+ with pytest.raises(docker.errors.ContainerError) as cm:
+ client.containers.run('alpine', 'echo hello world')
+ assert cm.value.exit_status == 1
+ assert "some error" in cm.exconly()
+
+ def test_run_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.run(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ detach=False,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+
+ def test_run_remove(self):
+ client = make_fake_client()
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api._version = '1.24'
+ with pytest.raises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ client = make_fake_client()
+ client.api._version = '1.23'
+ with pytest.raises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ client = make_fake_client()
+ client.api._version = '1.25'
+ client.containers.run("alpine", detach=True, remove=True)
+ client.api.remove_container.assert_not_called()
+ client.api.create_container.assert_called_with(
+ command=None,
+ image='alpine',
+ detach=True,
+ host_config={'AutoRemove': True,
+ 'NetworkMode': 'default'}
+ )
+
+ client = make_fake_client()
+ client.api._version = '1.26'
+ client.containers.run("alpine", detach=True, remove=True)
+ client.api.remove_container.assert_not_called()
+ client.api.create_container.assert_called_with(
+ command=None,
+ image='alpine',
+ detach=True,
+ host_config={'AutoRemove': True,
+ 'NetworkMode': 'default'}
+ )
+
+ def test_create(self):
+ client = make_fake_client()
+ container = client.containers.create(
+ 'alpine',
+ 'echo hello world',
+ environment={'FOO': 'BAR'}
+ )
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='echo hello world',
+ environment={'FOO': 'BAR'},
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_create_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.create(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ host_config={'NetworkMode': 'default'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ containers = client.containers.list(all=True)
+ client.api.containers.assert_called_with(
+ all=True,
+ before=None,
+ filters=None,
+ limit=-1,
+ since=None
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ assert len(containers) == 1
+ assert isinstance(containers[0], Container)
+ assert containers[0].id == FAKE_CONTAINER_ID
+
+
+class ContainerTest(unittest.TestCase):
+ def test_name(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.name == 'foobar'
+
+ def test_status(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.status == "running"
+
+ def test_attach(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attach(stream=True)
+ client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True)
+
+ def test_commit(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ image = container.commit()
+ client.api.commit.assert_called_with(FAKE_CONTAINER_ID,
+ repository=None,
+ tag=None)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_diff(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.diff()
+ client.api.diff.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_exec_run(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("echo hello world", privileged=True, stream=True)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ )
+
+ def test_exec_run_failure(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("docker ps", privileged=True, stream=False)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False
+ )
+
+ def test_export(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.export()
+ client.api.export.assert_called_with(
+ FAKE_CONTAINER_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_get_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.get_archive('foo')
+ client.api.get_archive.assert_called_with(
+ FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_image(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.image.id == FAKE_IMAGE_ID
+
+ def test_kill(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.kill(signal=5)
+ client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5)
+
+ def test_labels(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.labels == {'foo': 'bar'}
+
+ def test_logs(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.logs()
+ client.api.logs.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_pause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.pause()
+ client.api.pause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_put_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.put_archive('path', 'foo')
+ client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID,
+ 'path', 'foo')
+
+ def test_remove(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.remove()
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_rename(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.rename("foo")
+ client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo")
+
+ def test_resize(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.resize(1, 2)
+ client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2)
+
+ def test_restart(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.restart()
+ client.api.restart.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_start(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.start()
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stats(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stats()
+ client.api.stats.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stop(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stop()
+ client.api.stop.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_top(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.top()
+ client.api.top.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_unpause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.unpause()
+ client.api.unpause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_update(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.update(cpu_shares=2)
+ client.api.update_container.assert_called_with(FAKE_CONTAINER_ID,
+ cpu_shares=2)
+
+ def test_wait(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.wait()
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
new file mode 100644
index 0000000..6783279
--- /dev/null
+++ b/tests/unit/models_images_test.py
@@ -0,0 +1,128 @@
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+ def test_build(self):
+ client = make_fake_client()
+ image = client.images.build()
+ client.api.build.assert_called_with()
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_get(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_labels(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ assert image.labels == {'bar': 'foo'}
+
+ def test_list(self):
+ client = make_fake_client()
+ images = client.images.list(all=True)
+ client.api.images.assert_called_with(all=True, name=None, filters=None)
+ assert len(images) == 1
+ assert isinstance(images[0], Image)
+ assert images[0].id == FAKE_IMAGE_ID
+
+ def test_load(self):
+ client = make_fake_client()
+ client.images.load('byte stream')
+ client.api.load_image.assert_called_with('byte stream')
+
+ def test_pull(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image:latest')
+ client.api.pull.assert_called_with('test_image', tag='latest')
+ client.api.inspect_image.assert_called_with('test_image:latest')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_pull_multiple(self):
+ client = make_fake_client()
+ images = client.images.pull('test_image')
+ client.api.pull.assert_called_with('test_image', tag=None)
+ client.api.images.assert_called_with(
+ all=False, name='test_image', filters=None
+ )
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert len(images) == 1
+ image = images[0]
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_push(self):
+ client = make_fake_client()
+ client.images.push('foobar', insecure_registry=True)
+ client.api.push.assert_called_with(
+ 'foobar',
+ tag=None,
+ insecure_registry=True
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ client.images.remove('test_image')
+ client.api.remove_image.assert_called_with('test_image')
+
+ def test_search(self):
+ client = make_fake_client()
+ client.images.search('test')
+ client.api.search.assert_called_with('test')
+
+
+class ImageTest(unittest.TestCase):
+ def test_short_id(self):
+ image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'sha256:b684607067'
+
+ image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'b684607067'
+
+ def test_tags(self):
+ image = Image(attrs={
+ 'RepoTags': ['test_image:latest']
+ })
+ assert image.tags == ['test_image:latest']
+
+ image = Image(attrs={
+ 'RepoTags': ['<none>:<none>']
+ })
+ assert image.tags == []
+
+ image = Image(attrs={
+ 'RepoTags': None
+ })
+ assert image.tags == []
+
+ def test_history(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.history()
+ client.api.history.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_save(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.save()
+ client.api.get_image.assert_called_with(
+ FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_tag(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.tag('foo')
+ client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py
new file mode 100644
index 0000000..58c9fce
--- /dev/null
+++ b/tests/unit/models_networks_test.py
@@ -0,0 +1,64 @@
+import unittest
+
+from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class NetworkCollectionTest(unittest.TestCase):
+
+ def test_create(self):
+ client = make_fake_client()
+ network = client.networks.create("foobar", labels={'foo': 'bar'})
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+ assert client.api.create_network.called_once_with(
+ "foobar",
+ labels={'foo': 'bar'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ networks = client.networks.list()
+ assert networks[0].id == FAKE_NETWORK_ID
+ assert client.api.networks.called_once_with()
+
+ client = make_fake_client()
+ client.networks.list(ids=["abc"])
+ assert client.api.networks.called_once_with(ids=["abc"])
+
+ client = make_fake_client()
+ client.networks.list(names=["foobar"])
+ assert client.api.networks.called_once_with(names=["foobar"])
+
+
+class NetworkTest(unittest.TestCase):
+
+ def test_connect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.connect(FAKE_CONTAINER_ID)
+ assert client.api.connect_container_to_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_disconnect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.disconnect(FAKE_CONTAINER_ID)
+ assert client.api.disconnect_container_from_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.remove()
+ assert client.api.remove_network.called_once_with(FAKE_NETWORK_ID)
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
new file mode 100644
index 0000000..5af24ee
--- /dev/null
+++ b/tests/unit/models_resources_test.py
@@ -0,0 +1,28 @@
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ModelTest(unittest.TestCase):
+ def test_reload(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attrs['Name'] = "oldname"
+ container.reload()
+ assert client.api.inspect_container.call_count == 2
+ assert container.attrs['Name'] == "foobar"
+
+ def test_hash(self):
+ client = make_fake_client()
+ container1 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set = set([container1])
+ assert len(my_set) == 1
+
+ container2 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set.add(container2)
+ assert len(my_set) == 1
+
+ image1 = client.images.get(FAKE_CONTAINER_ID)
+ my_set.add(image1)
+ assert len(my_set) == 2
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
new file mode 100644
index 0000000..247bb4a
--- /dev/null
+++ b/tests/unit/models_services_test.py
@@ -0,0 +1,53 @@
+import unittest
+from docker.models.services import _get_create_service_kwargs
+
+
+class CreateServiceKwargsTest(unittest.TestCase):
+ def test_get_create_service_kwargs(self):
+ kwargs = _get_create_service_kwargs('test', {
+ 'image': 'foo',
+ 'command': 'true',
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'hostname': 'test_host',
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ 'container_labels': {'containerkey': 'containervalue'},
+ 'resources': {'foo': 'bar'},
+ 'restart_policy': {'restart': 'policy'},
+ 'log_driver': 'logdriver',
+ 'log_driver_options': {'foo': 'bar'},
+ 'args': ['some', 'args'],
+ 'env': {'FOO': 'bar'},
+ 'workdir': '/',
+ 'user': 'bob',
+ 'mounts': [{'some': 'mounts'}],
+ 'stop_grace_period': 5,
+ 'constraints': ['foo=bar'],
+ })
+
+ task_template = kwargs.pop('task_template')
+
+ assert kwargs == {
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'endpoint_spec': {'blah': 'blah'},
+ }
+ assert set(task_template.keys()) == set([
+ 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
+ 'LogDriver', 'Networks'
+ ])
+ assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ assert task_template['LogDriver'] == {
+ 'Name': 'logdriver',
+ 'Options': {'foo': 'bar'}
+ }
+ assert task_template['Networks'] == [{'Target': 'somenet'}]
+ assert set(task_template['ContainerSpec'].keys()) == set([
+ 'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
+ 'Labels', 'Mounts', 'StopGracePeriod'
+ ])
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
deleted file mode 100644
index 2521688..0000000
--- a/tests/unit/network_test.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import json
-
-import six
-
-from .. import base
-from .api_test import DockerClientTest, url_prefix, response
-from docker.utils import create_ipam_config, create_ipam_pool
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-
-class NetworkTest(DockerClientTest):
- @base.requires_api_version('1.21')
- def test_list_networks(self):
- networks = [
- {
- "name": "none",
- "id": "8e4e55c6863ef424",
- "type": "null",
- "endpoints": []
- },
- {
- "name": "host",
- "id": "062b6d9ea7913fde",
- "type": "host",
- "endpoints": []
- },
- ]
-
- get = mock.Mock(return_value=response(
- status_code=200, content=json.dumps(networks).encode('utf-8')))
-
- with mock.patch('docker.Client.get', get):
- self.assertEqual(self.client.networks(), networks)
-
- self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
-
- filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertFalse(filters)
-
- self.client.networks(names=['foo'])
- filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertEqual(filters, {'name': ['foo']})
-
- self.client.networks(ids=['123'])
- filters = json.loads(get.call_args[1]['params']['filters'])
- self.assertEqual(filters, {'id': ['123']})
-
- @base.requires_api_version('1.21')
- def test_create_network(self):
- network_data = {
- "id": 'abc12345',
- "warning": "",
- }
-
- network_response = response(status_code=200, content=network_data)
- post = mock.Mock(return_value=network_response)
-
- with mock.patch('docker.Client.post', post):
- result = self.client.create_network('foo')
- self.assertEqual(result, network_data)
-
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/create')
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {"Name": "foo"})
-
- opts = {
- 'com.docker.network.bridge.enable_icc': False,
- 'com.docker.network.bridge.enable_ip_masquerade': False,
- }
- self.client.create_network('foo', 'bridge', opts)
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {"Name": "foo", "Driver": "bridge", "Options": opts})
-
- ipam_pool_config = create_ipam_pool(subnet="192.168.52.0/24",
- gateway="192.168.52.254")
- ipam_config = create_ipam_config(pool_configs=[ipam_pool_config])
-
- self.client.create_network("bar", driver="bridge",
- ipam=ipam_config)
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {
- "Name": "bar",
- "Driver": "bridge",
- "IPAM": {
- "Driver": "default",
- "Config": [{
- "IPRange": None,
- "Gateway": "192.168.52.254",
- "Subnet": "192.168.52.0/24",
- "AuxiliaryAddresses": None,
- }]
- }
- })
-
- @base.requires_api_version('1.21')
- def test_remove_network(self):
- network_id = 'abc12345'
- delete = mock.Mock(return_value=response(status_code=200))
-
- with mock.patch('docker.Client.delete', delete):
- self.client.remove_network(network_id)
-
- args = delete.call_args
- self.assertEqual(args[0][0],
- url_prefix + 'networks/{0}'.format(network_id))
-
- @base.requires_api_version('1.21')
- def test_inspect_network(self):
- network_id = 'abc12345'
- network_name = 'foo'
- network_data = {
- six.u('name'): network_name,
- six.u('id'): network_id,
- six.u('driver'): 'bridge',
- six.u('containers'): {},
- }
-
- network_response = response(status_code=200, content=network_data)
- get = mock.Mock(return_value=network_response)
-
- with mock.patch('docker.Client.get', get):
- result = self.client.inspect_network(network_id)
- self.assertEqual(result, network_data)
-
- args = get.call_args
- self.assertEqual(args[0][0],
- url_prefix + 'networks/{0}'.format(network_id))
-
- @base.requires_api_version('1.21')
- def test_connect_container_to_network(self):
- network_id = 'abc12345'
- container_id = 'def45678'
-
- post = mock.Mock(return_value=response(status_code=201))
-
- with mock.patch('docker.Client.post', post):
- self.client.connect_container_to_network(
- {'Id': container_id},
- network_id,
- aliases=['foo', 'bar'],
- links=[('baz', 'quux')]
- )
-
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/{0}/connect'.format(network_id))
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {
- 'Container': container_id,
- 'EndpointConfig': {
- 'Aliases': ['foo', 'bar'],
- 'Links': ['baz:quux'],
- },
- })
-
- @base.requires_api_version('1.21')
- def test_disconnect_container_from_network(self):
- network_id = 'abc12345'
- container_id = 'def45678'
-
- post = mock.Mock(return_value=response(status_code=201))
-
- with mock.patch('docker.Client.post', post):
- self.client.disconnect_container_from_network(
- {'Id': container_id}, network_id)
-
- self.assertEqual(
- post.call_args[0][0],
- url_prefix + 'networks/{0}/disconnect'.format(network_id))
-
- self.assertEqual(
- json.loads(post.call_args[1]['data']),
- {'Container': container_id})
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 2ad1cad..73b7336 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -1,4 +1,6 @@
-from docker.ssladapter import ssladapter
+import unittest
+from docker.transport import ssladapter
+import pytest
try:
from backports.ssl_match_hostname import (
@@ -16,19 +18,18 @@ except ImportError:
OP_NO_SSLv3 = 0x2000000
OP_NO_TLSv1 = 0x4000000
-from .. import base
-
-class SSLAdapterTest(base.BaseTestCase):
+class SSLAdapterTest(unittest.TestCase):
def test_only_uses_tls(self):
ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
assert ssl_context.options & OP_NO_SSLv3
- assert ssl_context.options & OP_NO_SSLv2
+ # if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
+ assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
assert not ssl_context.options & OP_NO_TLSv1
-class MatchHostnameTest(base.BaseTestCase):
+class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
(('countryName', u'US'),),
@@ -69,11 +70,9 @@ class MatchHostnameTest(base.BaseTestCase):
assert match_hostname(self.cert, 'touhou.gensokyo.jp') is None
def test_match_ip_address_failure(self):
- self.assertRaises(
- CertificateError, match_hostname, self.cert, '192.168.0.25'
- )
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, '192.168.0.25')
def test_match_dns_failure(self):
- self.assertRaises(
- CertificateError, match_hostname, self.cert, 'foobar.co.uk'
- )
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, 'foobar.co.uk')
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
new file mode 100644
index 0000000..4385380
--- /dev/null
+++ b/tests/unit/swarm_test.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import json
+
+from . import fake_api
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
+
+
+class SwarmTest(BaseAPIClientTest):
+ @requires_api_version('1.24')
+ def test_node_update(self):
+ node_spec = {
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+
+ self.client.update_node(
+ node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec
+ )
+ args = fake_request.call_args
+ assert args[0][1] == (
+ url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
+ )
+ assert json.loads(args[1]['data']) == node_spec
+ assert args[1]['headers']['Content-Type'] == 'application/json'
+
+ @requires_api_version('1.24')
+ def test_join_swarm(self):
+ remote_addr = ['1.2.3.4:2377']
+ listen_addr = '2.3.4.5:2377'
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(
+ remote_addrs=remote_addr,
+ listen_addr=listen_addr,
+ join_token=join_token
+ )
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
+
+ @requires_api_version('1.24')
+ def test_join_swarm_no_listen_address_takes_default(self):
+ remote_addr = ['1.2.3.4:2377']
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': '0.0.0.0:2377',
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(remote_addrs=remote_addr, join_token=join_token)
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
new file mode 100644
index 0000000..50ba383
--- /dev/null
+++ b/tests/unit/utils_config_test.py
@@ -0,0 +1,123 @@
+import os
+import unittest
+import shutil
+import tempfile
+import json
+
+from py.test import ensuretemp
+from pytest import mark
+from docker.utils import config
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class FindConfigFileTest(unittest.TestCase):
+ def tmpdir(self, name):
+ tmpdir = ensuretemp(name)
+ self.addCleanup(tmpdir.remove)
+ return tmpdir
+
+ def test_find_config_fallback(self):
+ tmpdir = self.tmpdir('test_find_config_fallback')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() is None
+
+ def test_find_config_from_explicit_path(self):
+ tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ config_path = tmpdir.ensure('my-config-file.json')
+
+ assert config.find_config_file(str(config_path)) == str(config_path)
+
+ def test_find_config_from_environment(self):
+ tmpdir = self.tmpdir('test_find_config_from_environment')
+ config_path = tmpdir.ensure('config.json')
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_posix(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_legacy_name(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ config_path = tmpdir.ensure('.dockercfg')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform != 'win32'")
+ def test_find_config_from_home_windows(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = config.load_general_config(folder)
+ assert cfg is not None
+ assert isinstance(cfg, dict)
+ assert not cfg
+
+ def test_load_config_custom_headers(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'HttpHeaders': {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ },
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert 'HttpHeaders' in cfg
+ assert cfg['HttpHeaders'] == {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ }
+
+ def test_load_config_detach_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert cfg == config_data
+
+ def test_load_config_from_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = config.load_general_config(None)
+ assert cfg == config_data
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
new file mode 100644
index 0000000..f7aefd0
--- /dev/null
+++ b/tests/unit/utils_json_stream_test.py
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 290874f..00456e8 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -5,30 +5,27 @@ import json
import os
import os.path
import shutil
+import socket
import sys
import tarfile
import tempfile
+import unittest
import pytest
import six
-from docker.client import Client
-from docker.constants import (
- DEFAULT_DOCKER_API_VERSION, IS_WINDOWS_PLATFORM
-)
-from docker.errors import DockerException, InvalidVersion
+from docker.api.client import APIClient
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
- create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
- exclude_paths, convert_volume_binds, decode_json_header, tar,
- split_command, create_ipam_config, create_ipam_pool, parse_devices,
- update_headers
+ parse_bytes, parse_env_file, exclude_paths, convert_volume_binds,
+ decode_json_header, tar, split_command, parse_devices, update_headers,
)
from docker.utils.ports import build_port_bindings, split_port
-from docker.utils.utils import create_endpoint_config, format_environment
+from docker.utils.utils import format_environment
-from .. import base
from ..helpers import make_tree
@@ -38,7 +35,7 @@ TEST_CERT_DIR = os.path.join(
)
-class DecoratorsTest(base.BaseTestCase):
+class DecoratorsTest(unittest.TestCase):
def test_update_headers(self):
sample_headers = {
'X-Docker-Locale': 'en-US',
@@ -47,8 +44,8 @@ class DecoratorsTest(base.BaseTestCase):
def f(self, headers=None):
return headers
- client = Client()
- client._auth_configs = {}
+ client = APIClient()
+ client._general_configs = {}
g = update_headers(f)
assert g(client, headers=None) is None
@@ -57,7 +54,7 @@ class DecoratorsTest(base.BaseTestCase):
'Content-type': 'application/json',
}
- client._auth_configs = {
+ client._general_configs = {
'HttpHeaders': sample_headers
}
@@ -69,204 +66,7 @@ class DecoratorsTest(base.BaseTestCase):
}
-class HostConfigTest(base.BaseTestCase):
- def test_create_host_config_no_options(self):
- config = create_host_config(version='1.19')
- self.assertFalse('NetworkMode' in config)
-
- def test_create_host_config_no_options_newer_api_version(self):
- config = create_host_config(version='1.20')
- self.assertEqual(config['NetworkMode'], 'default')
-
- def test_create_host_config_invalid_cpu_cfs_types(self):
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota=23.11)
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period=1999.0)
-
- def test_create_host_config_with_cpu_quota(self):
- config = create_host_config(version='1.20', cpu_quota=1999)
- self.assertEqual(config.get('CpuQuota'), 1999)
-
- def test_create_host_config_with_cpu_period(self):
- config = create_host_config(version='1.20', cpu_period=1999)
- self.assertEqual(config.get('CpuPeriod'), 1999)
-
- def test_create_host_config_with_blkio_constraints(self):
- blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
- config = create_host_config(version='1.22',
- blkio_weight=1999,
- blkio_weight_device=blkio_rate,
- device_read_bps=blkio_rate,
- device_write_bps=blkio_rate,
- device_read_iops=blkio_rate,
- device_write_iops=blkio_rate)
-
- self.assertEqual(config.get('BlkioWeight'), 1999)
- self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
- self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
- self.assertEqual(blkio_rate[0]['Rate'], 1000)
-
- def test_create_host_config_with_shm_size(self):
- config = create_host_config(version='1.22', shm_size=67108864)
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_shm_size_in_mb(self):
- config = create_host_config(version='1.22', shm_size='64M')
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_oom_kill_disable(self):
- config = create_host_config(version='1.20', oom_kill_disable=True)
- self.assertEqual(config.get('OomKillDisable'), True)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.18.3',
- oom_kill_disable=True))
-
- def test_create_host_config_with_userns_mode(self):
- config = create_host_config(version='1.23', userns_mode='host')
- self.assertEqual(config.get('UsernsMode'), 'host')
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.22',
- userns_mode='host'))
- self.assertRaises(
- ValueError, lambda: create_host_config(version='1.23',
- userns_mode='host12'))
-
- def test_create_host_config_with_oom_score_adj(self):
- config = create_host_config(version='1.22', oom_score_adj=100)
- self.assertEqual(config.get('OomScoreAdj'), 100)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.21',
- oom_score_adj=100))
- self.assertRaises(
- TypeError, lambda: create_host_config(version='1.22',
- oom_score_adj='100'))
-
- def test_create_host_config_with_dns_opt(self):
-
- tested_opts = ['use-vc', 'no-tld-query']
- config = create_host_config(version='1.21', dns_opt=tested_opts)
- dns_opts = config.get('DnsOptions')
-
- self.assertTrue('use-vc' in dns_opts)
- self.assertTrue('no-tld-query' in dns_opts)
-
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.20',
- dns_opt=tested_opts))
-
- def test_create_endpoint_config_with_aliases(self):
- config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
- assert config == {'Aliases': ['foo', 'bar']}
-
- with pytest.raises(InvalidVersion):
- create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
-
- def test_create_host_config_with_mem_reservation(self):
- config = create_host_config(version='1.21', mem_reservation=67108864)
- self.assertEqual(config.get('MemoryReservation'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', mem_reservation=67108864))
-
- def test_create_host_config_with_kernel_memory(self):
- config = create_host_config(version='1.21', kernel_memory=67108864)
- self.assertEqual(config.get('KernelMemory'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', kernel_memory=67108864))
-
- def test_create_host_config_with_pids_limit(self):
- config = create_host_config(version='1.23', pids_limit=1024)
- self.assertEqual(config.get('PidsLimit'), 1024)
-
- with pytest.raises(InvalidVersion):
- create_host_config(version='1.22', pids_limit=1024)
- with pytest.raises(TypeError):
- create_host_config(version='1.22', pids_limit='1024')
-
-
-class UlimitTest(base.BaseTestCase):
- def test_create_host_config_dict_ulimit(self):
- ulimit_dct = {'name': 'nofile', 'soft': 8096}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_dict_ulimit_capitals(self):
- ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
- self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_obj_ulimit(self):
- ulimit_dct = Ulimit(name='nofile', soft=8096)
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj, ulimit_dct)
-
- def test_ulimit_invalid_type(self):
- self.assertRaises(ValueError, lambda: Ulimit(name=None))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
-
-
-class LogConfigTest(base.BaseTestCase):
- def test_create_host_config_dict_logconfig(self):
- dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=dct
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(dct['type'], config['LogConfig'].type)
-
- def test_create_host_config_obj_logconfig(self):
- obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=obj
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(obj, config['LogConfig'])
-
- def test_logconfig_invalid_config_type(self):
- with pytest.raises(ValueError):
- LogConfig(type=LogConfig.types.JSON, config='helloworld')
-
-
-class KwargsFromEnvTest(base.BaseTestCase):
+class KwargsFromEnvTest(unittest.TestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -279,25 +79,25 @@ class KwargsFromEnvTest(base.BaseTestCase):
os.environ.pop('DOCKER_TLS_VERIFY', None)
kwargs = kwargs_from_env()
- self.assertEqual(None, kwargs.get('base_url'))
- self.assertEqual(None, kwargs.get('tls'))
+ assert kwargs.get('base_url') is None
+ assert kwargs.get('tls') is None
def test_kwargs_from_env_tls(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
- self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
- self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
- self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
- self.assertTrue('key.pem' in kwargs['tls'].cert[1])
- self.assertEqual(False, kwargs['tls'].assert_hostname)
- self.assertTrue(kwargs['tls'].verify)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is False
+ assert kwargs['tls'].verify
try:
- client = Client(**kwargs)
- self.assertEqual(kwargs['base_url'], client.base_url)
- self.assertEqual(kwargs['tls'].ca_cert, client.verify)
- self.assertEqual(kwargs['tls'].cert, client.cert)
+ client = APIClient(**kwargs)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].ca_cert == client.verify
+ assert kwargs['tls'].cert == client.cert
except TypeError as e:
self.fail(e)
@@ -306,17 +106,17 @@ class KwargsFromEnvTest(base.BaseTestCase):
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
- self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
- self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
- self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
- self.assertTrue('key.pem' in kwargs['tls'].cert[1])
- self.assertEqual(True, kwargs['tls'].assert_hostname)
- self.assertEqual(False, kwargs['tls'].verify)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is True
+ assert kwargs['tls'].verify is False
try:
- client = Client(**kwargs)
- self.assertEqual(kwargs['base_url'], client.base_url)
- self.assertEqual(kwargs['tls'].cert, client.cert)
- self.assertFalse(kwargs['tls'].verify)
+ client = APIClient(**kwargs)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].cert == client.cert
+ assert not kwargs['tls'].verify
except TypeError as e:
self.fail(e)
@@ -330,7 +130,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
DOCKER_TLS_VERIFY='')
os.environ.pop('DOCKER_CERT_PATH', None)
kwargs = kwargs_from_env(assert_hostname=True)
- self.assertEqual('tcp://192.168.59.103:2376', kwargs['base_url'])
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
def test_kwargs_from_env_no_cert_path(self):
try:
@@ -343,10 +143,10 @@ class KwargsFromEnvTest(base.BaseTestCase):
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env()
- self.assertTrue(kwargs['tls'].verify)
- self.assertIn(cert_dir, kwargs['tls'].ca_cert)
- self.assertIn(cert_dir, kwargs['tls'].cert[0])
- self.assertIn(cert_dir, kwargs['tls'].cert[1])
+ assert kwargs['tls'].verify
+ assert cert_dir in kwargs['tls'].ca_cert
+ assert cert_dir in kwargs['tls'].cert[0]
+ assert cert_dir in kwargs['tls'].cert[1]
finally:
if temp_dir:
shutil.rmtree(temp_dir)
@@ -366,14 +166,14 @@ class KwargsFromEnvTest(base.BaseTestCase):
assert 'tls' not in kwargs
-class ConverVolumeBindsTest(base.BaseTestCase):
+class ConverVolumeBindsTest(unittest.TestCase):
def test_convert_volume_binds_empty(self):
- self.assertEqual(convert_volume_binds({}), [])
- self.assertEqual(convert_volume_binds([]), [])
+ assert convert_volume_binds({}) == []
+ assert convert_volume_binds([]) == []
def test_convert_volume_binds_list(self):
data = ['/a:/a:ro', '/b:/c:z']
- self.assertEqual(convert_volume_binds(data), data)
+ assert convert_volume_binds(data) == data
def test_convert_volume_binds_complete(self):
data = {
@@ -382,13 +182,13 @@ class ConverVolumeBindsTest(base.BaseTestCase):
'mode': 'ro'
}
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:ro']
def test_convert_volume_binds_compact(self):
data = {
'/mnt/vol1': '/data'
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_no_mode(self):
data = {
@@ -396,7 +196,7 @@ class ConverVolumeBindsTest(base.BaseTestCase):
'bind': '/data'
}
}
- self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
def test_convert_volume_binds_unicode_bytes_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
@@ -407,9 +207,7 @@ class ConverVolumeBindsTest(base.BaseTestCase):
'mode': 'rw'
}
}
- self.assertEqual(
- convert_volume_binds(data), expected
- )
+ assert convert_volume_binds(data) == expected
def test_convert_volume_binds_unicode_unicode_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
@@ -420,12 +218,10 @@ class ConverVolumeBindsTest(base.BaseTestCase):
'mode': 'rw'
}
}
- self.assertEqual(
- convert_volume_binds(data), expected
- )
+ assert convert_volume_binds(data) == expected
-class ParseEnvFileTest(base.BaseTestCase):
+class ParseEnvFileTest(unittest.TestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
@@ -441,34 +237,39 @@ class ParseEnvFileTest(base.BaseTestCase):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file,
- {'USER': 'jdoe', 'PASS': 'secret'})
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_with_equals_character(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=sec==ret')
get_parse_env_file = parse_env_file(env_file)
- self.assertEqual(get_parse_env_file,
- {'USER': 'jdoe', 'PASS': 'sec==ret'})
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'}
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
- get_parse_env_file = parse_env_file((env_file))
- self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_newline(self):
+ env_file = self.generate_tempfile(
+ file_content='\nUSER=jdoe\n\n\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
- self.assertRaises(
- DockerException, parse_env_file, env_file)
+ with pytest.raises(DockerException):
+ parse_env_file(env_file)
os.unlink(env_file)
-class ParseHostTest(base.BaseTestCase):
+class ParseHostTest(unittest.TestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
@@ -530,88 +331,76 @@ class ParseHostTest(base.BaseTestCase):
assert parse_host(host_value) == expected_result
-class ParseRepositoryTagTest(base.BaseTestCase):
+class ParseRepositoryTagTest(unittest.TestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("root"), ("root", None)
- )
+ assert parse_repository_tag("root") == ("root", None)
def test_index_image_tag(self):
- self.assertEqual(
- parse_repository_tag("root:tag"), ("root", "tag")
- )
+ assert parse_repository_tag("root:tag") == ("root", "tag")
def test_index_user_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("user/repo"), ("user/repo", None)
- )
+ assert parse_repository_tag("user/repo") == ("user/repo", None)
def test_index_user_image_tag(self):
- self.assertEqual(
- parse_repository_tag("user/repo:tag"), ("user/repo", "tag")
- )
+ assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag")
def test_private_reg_image_no_tag(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo"), ("url:5000/repo", None)
- )
+ assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", None)
def test_private_reg_image_tag(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")
+ assert parse_repository_tag("url:5000/repo:tag") == (
+ "url:5000/repo", "tag"
)
def test_index_image_sha(self):
- self.assertEqual(
- parse_repository_tag("root@sha256:{0}".format(self.sha)),
- ("root", "sha256:{0}".format(self.sha))
+ assert parse_repository_tag("root@sha256:{0}".format(self.sha)) == (
+ "root", "sha256:{0}".format(self.sha)
)
def test_private_reg_image_sha(self):
- self.assertEqual(
- parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)),
- ("url:5000/repo", "sha256:{0}".format(self.sha))
- )
+ assert parse_repository_tag(
+ "url:5000/repo@sha256:{0}".format(self.sha)
+ ) == ("url:5000/repo", "sha256:{0}".format(self.sha))
-class ParseDeviceTest(base.BaseTestCase):
+class ParseDeviceTest(unittest.TestCase):
def test_dict(self):
devices = parse_devices([{
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
}])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
- })
+ }
def test_partial_string_definition(self):
devices = parse_devices(['/dev/sda1'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/sda1',
'CgroupPermissions': 'rwm'
- })
+ }
def test_permissionless_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rwm'
- })
+ }
def test_full_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
- })
+ }
def test_hybrid_list(self):
devices = parse_devices([
@@ -623,39 +412,41 @@ class ParseDeviceTest(base.BaseTestCase):
}
])
- self.assertEqual(devices[0], {
+ assert devices[0] == {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rw'
- })
- self.assertEqual(devices[1], {
+ }
+ assert devices[1] == {
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
- })
+ }
-class ParseBytesTest(base.BaseTestCase):
+class ParseBytesTest(unittest.TestCase):
def test_parse_bytes_valid(self):
- self.assertEqual(parse_bytes("512MB"), 536870912)
- self.assertEqual(parse_bytes("512M"), 536870912)
- self.assertEqual(parse_bytes("512m"), 536870912)
+ assert parse_bytes("512MB") == 536870912
+ assert parse_bytes("512M") == 536870912
+ assert parse_bytes("512m") == 536870912
def test_parse_bytes_invalid(self):
- self.assertRaises(DockerException, parse_bytes, "512MK")
- self.assertRaises(DockerException, parse_bytes, "512L")
- self.assertRaises(DockerException, parse_bytes, "127.0.0.1K")
+ with pytest.raises(DockerException):
+ parse_bytes("512MK")
+ with pytest.raises(DockerException):
+ parse_bytes("512L")
+ with pytest.raises(DockerException):
+ parse_bytes("127.0.0.1K")
def test_parse_bytes_float(self):
- self.assertRaises(DockerException, parse_bytes, "1.5k")
+ with pytest.raises(DockerException):
+ parse_bytes("1.5k")
def test_parse_bytes_maxint(self):
- self.assertEqual(
- parse_bytes("{0}k".format(sys.maxsize)), sys.maxsize * 1024
- )
+ assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
-class UtilsTest(base.BaseTestCase):
+class UtilsTest(unittest.TestCase):
longMessage = True
def test_convert_filters(self):
@@ -667,7 +458,7 @@ class UtilsTest(base.BaseTestCase):
]
for filters, expected in tests:
- self.assertEqual(convert_filters(filters), expected)
+ assert convert_filters(filters) == expected
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
@@ -677,151 +468,167 @@ class UtilsTest(base.BaseTestCase):
else:
data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
- self.assertEqual(obj, decoded_data)
-
- def test_create_ipam_config(self):
- ipam_pool = create_ipam_pool(subnet='192.168.52.0/24',
- gateway='192.168.52.254')
-
- ipam_config = create_ipam_config(pool_configs=[ipam_pool])
- self.assertEqual(ipam_config, {
- 'Driver': 'default',
- 'Config': [{
- 'Subnet': '192.168.52.0/24',
- 'Gateway': '192.168.52.254',
- 'AuxiliaryAddresses': None,
- 'IPRange': None,
- }]
- })
+ assert obj == decoded_data
-class SplitCommandTest(base.BaseTestCase):
+class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
- self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
+ assert split_command(u'echo μμ') == ['echo', 'μμ']
@pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
def test_split_command_with_bytes(self):
- self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
+ assert split_command('echo μμ') == ['echo', 'μμ']
-class PortsTest(base.BaseTestCase):
+class PortsTest(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, [("127.0.0.1", "1000")])
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
- self.assertEqual(internal_port, ["2000/udp"])
- self.assertEqual(external_port, [("127.0.0.1", "1000")])
+ assert internal_port == ["2000/udp"]
+ assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, [("127.0.0.1", None)])
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", None)]
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port,
- [("127.0.0.1", None), ("127.0.0.1", None)])
+ assert internal_port == ["2000", "2001"]
+ assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)]
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, ["1000"])
+ assert internal_port == ["2000"]
+ assert external_port == ["1000"]
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port, ["1000", "1001"])
+ assert internal_port == ["2000", "2001"]
+ assert external_port == ["1000", "1001"]
+
+ def test_split_port_random_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000")
+ assert internal_port == ["2000"]
+ assert external_port == ["1000-1001"]
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
- self.assertEqual(internal_port, ["2000"])
- self.assertEqual(external_port, None)
+ assert internal_port == ["2000"]
+ assert external_port is None
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
- self.assertEqual(internal_port, ["2000", "2001"])
- self.assertEqual(external_port, None)
+ assert internal_port == ["2000", "2001"]
+ assert external_port is None
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
- self.assertEqual(internal_port, ["2000/udp", "2001/udp"])
- self.assertEqual(external_port,
- [("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
+ assert internal_port == ["2000/udp", "2001/udp"]
+ assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]
+
+ def test_split_port_with_ipv6_address(self):
+ internal_port, external_port = split_port(
+ "2001:abcd:ef00::2:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
def test_split_port_invalid(self):
- self.assertRaises(ValueError,
- lambda: split_port("0.0.0.0:1000:2000:tcp"))
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000:tcp")
def test_non_matching_length_port_ranges(self):
- self.assertRaises(
- ValueError,
- lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp")
- )
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000-1010:2000-2002/tcp")
def test_port_and_range_invalid(self):
- self.assertRaises(ValueError,
- lambda: split_port("0.0.0.0:1000:2000-2002/tcp"))
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000-2002/tcp")
def test_port_only_with_colon(self):
- self.assertRaises(ValueError,
- lambda: split_port(":80"))
+ with pytest.raises(ValueError):
+ split_port(":80")
def test_host_only_with_colon(self):
- self.assertRaises(ValueError,
- lambda: split_port("localhost:"))
+ with pytest.raises(ValueError):
+ split_port("localhost:")
+
+ def test_with_no_container_port(self):
+ with pytest.raises(ValueError):
+ split_port("localhost:80:")
+
+ def test_split_port_empty_string(self):
+ with pytest.raises(ValueError):
+ split_port("")
+
+ def test_split_port_non_string(self):
+ assert split_port(1243) == (['1243'], None)
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
- self.assertEqual(port_bindings["1000"],
- [("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["1001"] == [("127.0.0.1", "1001")]
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
- self.assertEqual(port_bindings["1000"],
- [("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
- self.assertEqual(port_bindings["1001"],
- [("127.0.0.1", "1001"), ("127.0.0.1", "2001")])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+ assert port_bindings["1001"] == [
+ ("127.0.0.1", "1001"), ("127.0.0.1", "2001")
+ ]
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
- self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
- self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
def convert_paths(collection):
- if not IS_WINDOWS_PLATFORM:
- return collection
- return set(map(lambda x: x.replace('/', '\\'), collection))
+ return set(map(convert_path, collection))
-class ExcludePathsTest(base.BaseTestCase):
+def convert_path(path):
+ return path.replace('/', os.path.sep)
+
+
+class ExcludePathsTest(unittest.TestCase):
dirs = [
'foo',
'foo/bar',
'bar',
+ 'target',
+ 'target/subdir',
+ 'subdir',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir'
]
files = [
@@ -837,6 +644,14 @@ class ExcludePathsTest(base.BaseTestCase):
'foo/bar/a.py',
'bar/a.py',
'foo/Dockerfile3',
+ 'target/file.txt',
+ 'target/subdir/file.txt',
+ 'subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
]
all_paths = set(dirs + files)
@@ -875,16 +690,23 @@ class ExcludePathsTest(base.BaseTestCase):
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
- assert self.exclude(['*'], dockerfile='Dockerfile.alt') == \
- set(['Dockerfile.alt', '.dockerignore'])
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(
+ ['Dockerfile.alt', '.dockerignore']
+ )
- assert self.exclude(['*'], dockerfile='foo/Dockerfile3') == \
- set(['foo/Dockerfile3', '.dockerignore'])
+ assert self.exclude(
+ ['*'], dockerfile='foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+
+ # https://github.com/docker/docker-py/issues/1956
+ assert self.exclude(
+ ['*'], dockerfile='./foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
- assert 'foo/Dockerfile3' in includes
- assert 'foo/a.py' not in includes
+ assert convert_path('foo/Dockerfile3') in includes
+ assert convert_path('foo/a.py') not in includes
def test_single_filename(self):
assert self.exclude(['a.py']) == convert_paths(
@@ -935,6 +757,18 @@ class ExcludePathsTest(base.BaseTestCase):
self.all_paths - set(['foo/a.py'])
)
+ def test_single_subdir_single_filename_leading_slash(self):
+ assert self.exclude(['/foo/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_exclude_include_absolute_path(self):
+ base = make_tree([], ['a.py', 'b.py'])
+ assert exclude_paths(
+ base,
+ ['/*', '!/*.py']
+ ) == set(['a.py', 'b.py'])
+
def test_single_subdir_with_path_traversal(self):
assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
self.all_paths - set(['foo/a.py'])
@@ -986,6 +820,16 @@ class ExcludePathsTest(base.BaseTestCase):
])
)
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_directory_with_subdir_exception_win32_pathsep(self):
+ assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
self.all_paths - set([
@@ -998,8 +842,89 @@ class ExcludePathsTest(base.BaseTestCase):
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_subdirectory_win32_pathsep(self):
+ assert self.exclude(['foo\\bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_double_wildcard(self):
+ assert self.exclude(['**/a.py']) == convert_paths(
+ self.all_paths - set(
+ ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
+ )
+ )
-class TarTest(base.Cleanup, base.BaseTestCase):
+ assert self.exclude(['foo/**/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_single_and_double_wildcard(self):
+ assert self.exclude(['**/target/*/*']) == convert_paths(
+ self.all_paths - set(
+ ['target/subdir/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt']
+ )
+ )
+
+ def test_trailing_double_wildcard(self):
+ assert self.exclude(['subdir/**']) == convert_paths(
+ self.all_paths - set(
+ ['subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir']
+ )
+ )
+
+ def test_include_wildcard(self):
+ base = make_tree(['a'], ['a/b.py'])
+ assert exclude_paths(
+ base,
+ ['*', '!*/b.py']
+ ) == convert_paths(['a/b.py'])
+
+ def test_last_line_precedence(self):
+ base = make_tree(
+ [],
+ ['garbage.md',
+ 'thrash.md',
+ 'README.md',
+ 'README-bis.md',
+ 'README-secret.md'])
+ assert exclude_paths(
+ base,
+ ['*.md', '!README*.md', 'README-secret.md']
+ ) == set(['README.md', 'README-bis.md'])
+
+ def test_parent_directory(self):
+ base = make_tree(
+ [],
+ ['a.py',
+ 'b.py',
+ 'c.py'])
+ # Dockerignore reference stipulates that absolute paths are
+ # equivalent to relative paths, hence /../foo should be
+ # equivalent to ../foo. It also stipulates that paths are run
+ # through Go's filepath.Clean, which explicitely "replace
+ # "/.." by "/" at the beginning of a path".
+ assert exclude_paths(
+ base,
+ ['../a.py', '/../b.py']
+ ) == set(['c.py'])
+
+
+class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
dirs = [
'foo',
@@ -1053,7 +978,25 @@ class TarTest(base.Cleanup, base.BaseTestCase):
os.makedirs(os.path.join(base, d))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(sorted(tar_data.getnames()), ['bar', 'foo'])
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM or os.geteuid() == 0,
+ reason='root user always has access ; no chmod on Windows'
+ )
+ def test_tar_with_inaccessible_file(self):
+ base = tempfile.mkdtemp()
+ full_path = os.path.join(base, 'foo')
+ self.addCleanup(shutil.rmtree, base)
+ with open(full_path, 'w') as f:
+ f.write('content')
+ os.chmod(full_path, 0o222)
+ with pytest.raises(IOError) as ei:
+ tar(base)
+
+ assert 'Can not read file in context: {}'.format(full_path) in (
+ ei.exconly()
+ )
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_file_symlinks(self):
@@ -1065,9 +1008,7 @@ class TarTest(base.Cleanup, base.BaseTestCase):
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(
- sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
- )
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_directory_symlinks(self):
@@ -1078,12 +1019,62 @@ class TarTest(base.Cleanup, base.BaseTestCase):
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
- self.assertEqual(
- sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
- )
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_broken_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+
+ os.symlink('../baz', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
+ def test_tar_socket_file(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ sock = socket.socket(socket.AF_UNIX)
+ self.addCleanup(sock.close)
+ sock.bind(os.path.join(base, 'test.sock'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ def tar_test_negative_mtime_bug(self):
+ base = tempfile.mkdtemp()
+ filename = os.path.join(base, 'th.txt')
+ self.addCleanup(shutil.rmtree, base)
+ with open(filename, 'w') as f:
+ f.write('Invisible Full Moon')
+ os.utime(filename, (12345, -3600.0))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert tar_data.getnames() == ['th.txt']
+ assert tar_data.getmember('th.txt').mtime == -3600
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_directory_link(self):
+ dirs = ['a', 'b', 'a/c']
+ files = ['a/hello.py', 'b/utils.py', 'a/c/descend.py']
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+ os.symlink(os.path.join(base, 'b'), os.path.join(base, 'a/c/b'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ names = tar_data.getnames()
+ for member in dirs + files:
+ assert member in names
+ assert 'a/c/b' in names
+ assert 'a/c/b/utils.py' not in names
-class FormatEnvironmentTest(base.BaseTestCase):
+class FormatEnvironmentTest(unittest.TestCase):
def test_format_env_binary_unicode_value(self):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'