summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2018-09-05 20:08:46 -0300
committerFelipe Sateler <fsateler@debian.org>2018-09-05 20:08:46 -0300
commite1cbc65fa82eeef6f4b76dcc360416959928dfef (patch)
tree9f2131841870ebe4b43b61c9c65ec545178ceba8
Import python-docker_3.4.1.orig.tar.gz
[dgit import orig python-docker_3.4.1.orig.tar.gz]
-rw-r--r--LICENSE191
-rw-r--r--MANIFEST.in8
-rw-r--r--PKG-INFO118
-rw-r--r--README.md77
-rw-r--r--README.rst94
-rw-r--r--docker.egg-info/PKG-INFO118
-rw-r--r--docker.egg-info/SOURCES.txt129
-rw-r--r--docker.egg-info/dependency_links.txt1
-rw-r--r--docker.egg-info/not-zip-safe1
-rw-r--r--docker.egg-info/requires.txt21
-rw-r--r--docker.egg-info/top_level.txt1
-rw-r--r--docker/__init__.py7
-rw-r--r--docker/api/__init__.py2
-rw-r--r--docker/api/build.py350
-rw-r--r--docker/api/client.py460
-rw-r--r--docker/api/config.py91
-rw-r--r--docker/api/container.py1254
-rw-r--r--docker/api/daemon.py181
-rw-r--r--docker/api/exec_api.py165
-rw-r--r--docker/api/image.py562
-rw-r--r--docker/api/network.py272
-rw-r--r--docker/api/plugin.py262
-rw-r--r--docker/api/secret.py102
-rw-r--r--docker/api/service.py444
-rw-r--r--docker/api/swarm.py389
-rw-r--r--docker/api/volume.py161
-rw-r--r--docker/auth.py303
-rw-r--r--docker/client.py208
-rw-r--r--docker/constants.py20
-rw-r--r--docker/errors.py162
-rw-r--r--docker/models/__init__.py0
-rw-r--r--docker/models/configs.py69
-rw-r--r--docker/models/containers.py1068
-rw-r--r--docker/models/images.py447
-rw-r--r--docker/models/networks.py215
-rw-r--r--docker/models/nodes.py107
-rw-r--r--docker/models/plugins.py200
-rw-r--r--docker/models/resource.py93
-rw-r--r--docker/models/secrets.py69
-rw-r--r--docker/models/services.py352
-rw-r--r--docker/models/swarm.py168
-rw-r--r--docker/models/volumes.py99
-rw-r--r--docker/tls.py112
-rw-r--r--docker/transport/__init__.py8
-rw-r--r--docker/transport/npipeconn.py108
-rw-r--r--docker/transport/npipesocket.py219
-rw-r--r--docker/transport/ssladapter.py71
-rw-r--r--docker/transport/unixconn.py112
-rw-r--r--docker/types/__init__.py11
-rw-r--r--docker/types/base.py7
-rw-r--r--docker/types/containers.py598
-rw-r--r--docker/types/daemon.py64
-rw-r--r--docker/types/healthcheck.py88
-rw-r--r--docker/types/networks.py111
-rw-r--r--docker/types/services.py715
-rw-r--r--docker/types/swarm.py119
-rw-r--r--docker/utils/__init__.py13
-rw-r--r--docker/utils/build.py255
-rw-r--r--docker/utils/config.py66
-rw-r--r--docker/utils/decorators.py47
-rw-r--r--docker/utils/fnmatch.py115
-rw-r--r--docker/utils/json_stream.py80
-rw-r--r--docker/utils/ports.py83
-rw-r--r--docker/utils/socket.py100
-rw-r--r--docker/utils/utils.py489
-rw-r--r--docker/version.py2
-rw-r--r--requirements.txt18
-rw-r--r--setup.cfg11
-rw-r--r--setup.py89
-rw-r--r--test-requirements.txt6
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/helpers.py138
-rw-r--r--tests/integration/__init__.py0
-rw-r--r--tests/integration/api_build_test.py546
-rw-r--r--tests/integration/api_client_test.py77
-rw-r--r--tests/integration/api_config_test.py72
-rw-r--r--tests/integration/api_container_test.py1513
-rw-r--r--tests/integration/api_exec_test.py205
-rw-r--r--tests/integration/api_healthcheck_test.py68
-rw-r--r--tests/integration/api_image_test.py368
-rw-r--r--tests/integration/api_network_test.py474
-rw-r--r--tests/integration/api_plugin_test.py145
-rw-r--r--tests/integration/api_secret_test.py72
-rw-r--r--tests/integration/api_service_test.py1255
-rw-r--r--tests/integration/api_swarm_test.py207
-rw-r--r--tests/integration/api_volume_test.py69
-rw-r--r--tests/integration/base.py125
-rw-r--r--tests/integration/client_test.py49
-rw-r--r--tests/integration/conftest.py29
-rw-r--r--tests/integration/errors_test.py15
-rw-r--r--tests/integration/models_containers_test.py364
-rw-r--r--tests/integration/models_images_test.py136
-rw-r--r--tests/integration/models_networks_test.py70
-rw-r--r--tests/integration/models_nodes_test.py37
-rw-r--r--tests/integration/models_resources_test.py16
-rw-r--r--tests/integration/models_services_test.py335
-rw-r--r--tests/integration/models_swarm_test.py33
-rw-r--r--tests/integration/models_volumes_test.py30
-rw-r--r--tests/integration/regression_test.py65
-rw-r--r--tests/integration/testdata/dummy-plugin/config.json19
-rw-r--r--tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt0
-rw-r--r--tests/unit/__init__.py0
-rw-r--r--tests/unit/api_build_test.py163
-rw-r--r--tests/unit/api_container_test.py1448
-rw-r--r--tests/unit/api_exec_test.py83
-rw-r--r--tests/unit/api_image_test.py357
-rw-r--r--tests/unit/api_network_test.py169
-rw-r--r--tests/unit/api_test.py593
-rw-r--r--tests/unit/api_volume_test.py115
-rw-r--r--tests/unit/auth_test.py506
-rw-r--r--tests/unit/client_test.py112
-rw-r--r--tests/unit/dockertypes_test.py470
-rw-r--r--tests/unit/errors_test.py133
-rw-r--r--tests/unit/fake_api.py645
-rw-r--r--tests/unit/fake_api_client.py67
-rw-r--r--tests/unit/fake_stat.py133
-rw-r--r--tests/unit/models_containers_test.py550
-rw-r--r--tests/unit/models_images_test.py128
-rw-r--r--tests/unit/models_networks_test.py64
-rw-r--r--tests/unit/models_resources_test.py28
-rw-r--r--tests/unit/models_services_test.py53
-rw-r--r--tests/unit/ssladapter_test.py78
-rw-r--r--tests/unit/swarm_test.py71
-rw-r--r--tests/unit/testdata/certs/ca.pem0
-rw-r--r--tests/unit/testdata/certs/cert.pem0
-rw-r--r--tests/unit/testdata/certs/key.pem0
-rw-r--r--tests/unit/utils_build_test.py493
-rw-r--r--tests/unit/utils_config_test.py123
-rw-r--r--tests/unit/utils_json_stream_test.py62
-rw-r--r--tests/unit/utils_test.py619
130 files changed, 26553 insertions, 0 deletions
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..75191a4
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..41b3fa9
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,8 @@
+include test-requirements.txt
+include requirements.txt
+include README.md
+include README.rst
+include LICENSE
+recursive-include tests *.py
+recursive-include tests/unit/testdata *
+recursive-include tests/integration/testdata *
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..d8c4409
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,118 @@
+Metadata-Version: 1.1
+Name: docker
+Version: 3.4.1
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
+Author: Joffrey F
+Author-email: joffrey@docker.com
+License: Apache License 2.0
+Description: Docker SDK for Python
+ =====================
+
+ |Build Status|
+
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
+
+ Installation
+ ------------
+
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
+
+ ::
+
+ pip install docker
+
+ If you are intending to connect to a docker host via TLS, add
+ ``docker[tls]`` to your requirements instead, or install with pip:
+
+ ::
+
+ pip install docker[tls]
+
+ Usage
+ -----
+
+ Connect to Docker using the default socket or the configuration in your
+ environment:
+
+ .. code:: python
+
+ import docker
+ client = docker.from_env()
+
+ You can run containers:
+
+ .. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
+
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
+ :target: https://travis-ci.org/docker/docker-py
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Utilities
+Classifier: License :: OSI Approved :: Apache Software License
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3ff124d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,77 @@
+# Docker SDK for Python
+
+[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
+
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+## Installation
+
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
+
+ pip install docker
+
+If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
+
+ pip install docker[tls]
+
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print line.strip()
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..d0117e6
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,94 @@
+Docker SDK for Python
+=====================
+
+|Build Status|
+
+A Python library for the Docker Engine API. It lets you do anything the
+``docker`` command does, but from within Python apps – run containers,
+manage containers, manage Swarms, etc.
+
+Installation
+------------
+
+The latest stable version `is available on
+PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+your ``requirements.txt`` file or install with pip:
+
+::
+
+ pip install docker
+
+If you are intending to connect to a docker host via TLS, add
+``docker[tls]`` to your requirements instead, or install with pip:
+
+::
+
+ pip install docker[tls]
+
+Usage
+-----
+
+Connect to Docker using the default socket or the configuration in your
+environment:
+
+.. code:: python
+
+ import docker
+ client = docker.from_env()
+
+You can run containers:
+
+.. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+You can run containers in the background:
+
+.. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+You can manage containers:
+
+.. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+You can stream logs:
+
+.. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+You can manage images:
+
+.. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+`Read the full documentation <https://docker-py.readthedocs.io>`__ to
+see everything you can do.
+
+.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
+ :target: https://travis-ci.org/docker/docker-py
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
new file mode 100644
index 0000000..d8c4409
--- /dev/null
+++ b/docker.egg-info/PKG-INFO
@@ -0,0 +1,118 @@
+Metadata-Version: 1.1
+Name: docker
+Version: 3.4.1
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
+Author: Joffrey F
+Author-email: joffrey@docker.com
+License: Apache License 2.0
+Description: Docker SDK for Python
+ =====================
+
+ |Build Status|
+
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
+
+ Installation
+ ------------
+
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
+
+ ::
+
+ pip install docker
+
+ If you are intending to connect to a docker host via TLS, add
+ ``docker[tls]`` to your requirements instead, or install with pip:
+
+ ::
+
+ pip install docker[tls]
+
+ Usage
+ -----
+
+ Connect to Docker using the default socket or the configuration in your
+ environment:
+
+ .. code:: python
+
+ import docker
+ client = docker.from_env()
+
+ You can run containers:
+
+ .. code:: python
+
+ >>> client.containers.run("ubuntu:latest", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
+
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
+ :target: https://travis-ci.org/docker/docker-py
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Utilities
+Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
new file mode 100644
index 0000000..02d2f19
--- /dev/null
+++ b/docker.egg-info/SOURCES.txt
@@ -0,0 +1,129 @@
+LICENSE
+MANIFEST.in
+README.md
+README.rst
+requirements.txt
+setup.cfg
+setup.py
+test-requirements.txt
+docker/__init__.py
+docker/auth.py
+docker/client.py
+docker/constants.py
+docker/errors.py
+docker/tls.py
+docker/version.py
+docker.egg-info/PKG-INFO
+docker.egg-info/SOURCES.txt
+docker.egg-info/dependency_links.txt
+docker.egg-info/not-zip-safe
+docker.egg-info/requires.txt
+docker.egg-info/top_level.txt
+docker/api/__init__.py
+docker/api/build.py
+docker/api/client.py
+docker/api/config.py
+docker/api/container.py
+docker/api/daemon.py
+docker/api/exec_api.py
+docker/api/image.py
+docker/api/network.py
+docker/api/plugin.py
+docker/api/secret.py
+docker/api/service.py
+docker/api/swarm.py
+docker/api/volume.py
+docker/models/__init__.py
+docker/models/configs.py
+docker/models/containers.py
+docker/models/images.py
+docker/models/networks.py
+docker/models/nodes.py
+docker/models/plugins.py
+docker/models/resource.py
+docker/models/secrets.py
+docker/models/services.py
+docker/models/swarm.py
+docker/models/volumes.py
+docker/transport/__init__.py
+docker/transport/npipeconn.py
+docker/transport/npipesocket.py
+docker/transport/ssladapter.py
+docker/transport/unixconn.py
+docker/types/__init__.py
+docker/types/base.py
+docker/types/containers.py
+docker/types/daemon.py
+docker/types/healthcheck.py
+docker/types/networks.py
+docker/types/services.py
+docker/types/swarm.py
+docker/utils/__init__.py
+docker/utils/build.py
+docker/utils/config.py
+docker/utils/decorators.py
+docker/utils/fnmatch.py
+docker/utils/json_stream.py
+docker/utils/ports.py
+docker/utils/socket.py
+docker/utils/utils.py
+tests/__init__.py
+tests/helpers.py
+tests/integration/__init__.py
+tests/integration/api_build_test.py
+tests/integration/api_client_test.py
+tests/integration/api_config_test.py
+tests/integration/api_container_test.py
+tests/integration/api_exec_test.py
+tests/integration/api_healthcheck_test.py
+tests/integration/api_image_test.py
+tests/integration/api_network_test.py
+tests/integration/api_plugin_test.py
+tests/integration/api_secret_test.py
+tests/integration/api_service_test.py
+tests/integration/api_swarm_test.py
+tests/integration/api_volume_test.py
+tests/integration/base.py
+tests/integration/client_test.py
+tests/integration/conftest.py
+tests/integration/errors_test.py
+tests/integration/models_containers_test.py
+tests/integration/models_images_test.py
+tests/integration/models_networks_test.py
+tests/integration/models_nodes_test.py
+tests/integration/models_resources_test.py
+tests/integration/models_services_test.py
+tests/integration/models_swarm_test.py
+tests/integration/models_volumes_test.py
+tests/integration/regression_test.py
+tests/integration/testdata/dummy-plugin/config.json
+tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
+tests/unit/__init__.py
+tests/unit/api_build_test.py
+tests/unit/api_container_test.py
+tests/unit/api_exec_test.py
+tests/unit/api_image_test.py
+tests/unit/api_network_test.py
+tests/unit/api_test.py
+tests/unit/api_volume_test.py
+tests/unit/auth_test.py
+tests/unit/client_test.py
+tests/unit/dockertypes_test.py
+tests/unit/errors_test.py
+tests/unit/fake_api.py
+tests/unit/fake_api_client.py
+tests/unit/fake_stat.py
+tests/unit/models_containers_test.py
+tests/unit/models_images_test.py
+tests/unit/models_networks_test.py
+tests/unit/models_resources_test.py
+tests/unit/models_services_test.py
+tests/unit/ssladapter_test.py
+tests/unit/swarm_test.py
+tests/unit/utils_build_test.py
+tests/unit/utils_config_test.py
+tests/unit/utils_json_stream_test.py
+tests/unit/utils_test.py
+tests/unit/testdata/certs/ca.pem
+tests/unit/testdata/certs/cert.pem
+tests/unit/testdata/certs/key.pem \ No newline at end of file
diff --git a/docker.egg-info/dependency_links.txt b/docker.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/docker.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/docker.egg-info/not-zip-safe b/docker.egg-info/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/docker.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt
new file mode 100644
index 0000000..e0b0763
--- /dev/null
+++ b/docker.egg-info/requires.txt
@@ -0,0 +1,21 @@
+requests!=2.18.0,>=2.14.2
+six>=1.4.0
+websocket-client>=0.32.0
+docker-pycreds>=0.3.0
+
+[:python_version < "3.3"]
+ipaddress>=1.0.16
+
+[:python_version < "3.5"]
+backports.ssl_match_hostname>=3.5
+
+[:sys_platform == "win32" and python_version < "3.6"]
+pypiwin32==219
+
+[:sys_platform == "win32" and python_version >= "3.6"]
+pypiwin32==220
+
+[tls]
+pyOpenSSL>=0.14
+cryptography>=1.3.4
+idna>=2.0.0
diff --git a/docker.egg-info/top_level.txt b/docker.egg-info/top_level.txt
new file mode 100644
index 0000000..bdb9670
--- /dev/null
+++ b/docker.egg-info/top_level.txt
@@ -0,0 +1 @@
+docker
diff --git a/docker/__init__.py b/docker/__init__.py
new file mode 100644
index 0000000..cf732e1
--- /dev/null
+++ b/docker/__init__.py
@@ -0,0 +1,7 @@
+# flake8: noqa
+from .api import APIClient
+from .client import DockerClient, from_env
+from .version import version, version_info
+
+__version__ = version
+__title__ = 'docker'
diff --git a/docker/api/__init__.py b/docker/api/__init__.py
new file mode 100644
index 0000000..ff51844
--- /dev/null
+++ b/docker/api/__init__.py
@@ -0,0 +1,2 @@
+# flake8: noqa
+from .client import APIClient
diff --git a/docker/api/build.py b/docker/api/build.py
new file mode 100644
index 0000000..419255f
--- /dev/null
+++ b/docker/api/build.py
@@ -0,0 +1,350 @@
+import json
+import logging
+import os
+import random
+
+from .. import auth
+from .. import constants
+from .. import errors
+from .. import utils
+
+
+log = logging.getLogger(__name__)
+
+
+class BuildApiMixin(object):
+ def build(self, path=None, tag=None, quiet=False, fileobj=None,
+ nocache=False, rm=False, timeout=None,
+ custom_context=False, encoding=None, pull=False,
+ forcerm=False, dockerfile=None, container_limits=None,
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None,
+ squash=None, extra_hosts=None, platform=None, isolation=None):
+ """
+ Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+ needs to be set. ``path`` can be a local path (to a directory
+ containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+ readable file-like object to a Dockerfile.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ Example:
+ >>> from io import BytesIO
+ >>> from docker import APIClient
+ >>> dockerfile = '''
+ ... # Shared Volume
+ ... FROM busybox:buildroot-2014.02
+ ... VOLUME /data
+ ... CMD ["/bin/sh"]
+ ... '''
+ >>> f = BytesIO(dockerfile.encode('utf-8'))
+ >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+ >>> response = [line for line in cli.build(
+ ... fileobj=f, rm=True, tag='yourname/volume'
+ ... )]
+ >>> response
+ ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+ '{"stream":"Step 1 : VOLUME /data\\n"}',
+ '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+ '{"stream":" ---\\u003e 713bca62012e\\n"}',
+ '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+ '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+ '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+ '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+ '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+ '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+
+ Returns:
+ A generator for the build output.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ remote = context = None
+ headers = {}
+ container_limits = container_limits or {}
+ if path is None and fileobj is None:
+ raise TypeError("Either path or fileobj needs to be provided.")
+ if gzip and encoding is not None:
+ raise errors.DockerException(
+ 'Can not use custom encoding if gzip is enabled'
+ )
+
+ for key in container_limits.keys():
+ if key not in constants.CONTAINER_LIMITS_KEYS:
+ raise errors.DockerException(
+ 'Invalid container_limits key {0}'.format(key)
+ )
+
+ if custom_context:
+ if not fileobj:
+ raise TypeError("You must specify fileobj with custom_context")
+ context = fileobj
+ elif fileobj is not None:
+ context = utils.mkbuildcontext(fileobj)
+ elif path.startswith(('http://', 'https://',
+ 'git://', 'github.com/', 'git@')):
+ remote = path
+ elif not os.path.isdir(path):
+ raise TypeError("You must specify a directory to build in path")
+ else:
+ dockerignore = os.path.join(path, '.dockerignore')
+ exclude = None
+ if os.path.exists(dockerignore):
+ with open(dockerignore, 'r') as f:
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [l.strip() for l in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, path)
+ context = utils.tar(
+ path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
+ )
+ encoding = 'gzip' if gzip else encoding
+
+ u = self._url('/build')
+ params = {
+ 't': tag,
+ 'remote': remote,
+ 'q': quiet,
+ 'nocache': nocache,
+ 'rm': rm,
+ 'forcerm': forcerm,
+ 'pull': pull,
+ 'dockerfile': dockerfile,
+ }
+ params.update(container_limits)
+
+ if buildargs:
+ params.update({'buildargs': json.dumps(buildargs)})
+
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
+ if cache_from:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'cachefrom': json.dumps(cache_from)})
+ else:
+ raise errors.InvalidVersion(
+ 'cache_from was only introduced in API version 1.25'
+ )
+
+ if target:
+ if utils.version_gte(self._version, '1.29'):
+ params.update({'target': target})
+ else:
+ raise errors.InvalidVersion(
+ 'target was only introduced in API version 1.29'
+ )
+
+ if network_mode:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'networkmode': network_mode})
+ else:
+ raise errors.InvalidVersion(
+ 'network_mode was only introduced in API version 1.25'
+ )
+
+ if squash:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'squash': squash})
+ else:
+ raise errors.InvalidVersion(
+ 'squash was only introduced in API version 1.25'
+ )
+
+ if extra_hosts is not None:
+ if utils.version_lt(self._version, '1.27'):
+ raise errors.InvalidVersion(
+ 'extra_hosts was only introduced in API version 1.27'
+ )
+
+ if isinstance(extra_hosts, dict):
+ extra_hosts = utils.format_extra_hosts(extra_hosts)
+ params.update({'extrahosts': extra_hosts})
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
+
+ if isolation is not None:
+ if utils.version_lt(self._version, '1.24'):
+ raise errors.InvalidVersion(
+ 'isolation was only introduced in API version 1.24'
+ )
+ params['isolation'] = isolation
+
+ if context is not None:
+ headers = {'Content-Type': 'application/tar'}
+ if encoding:
+ headers['Content-Encoding'] = encoding
+
+ self._set_auth_headers(headers)
+
+ response = self._post(
+ u,
+ data=context,
+ params=params,
+ headers=headers,
+ stream=True,
+ timeout=timeout,
+ )
+
+ if context is not None and not custom_context:
+ context.close()
+
+ return self._stream_helper(response, decode=decode)
+
+ @utils.minimum_version('1.31')
+ def prune_builds(self):
+ """
+ Delete the builder cache
+
+ Returns:
+ (dict): A dictionary containing information about the operation's
+ result. The ``SpaceReclaimed`` key indicates the amount of
+ bytes of disk space reclaimed.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/build/prune")
+ return self._result(self._post(url), True)
+
+ def _set_auth_headers(self, headers):
+ log.debug('Looking for auth config')
+
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs:
+ log.debug("No auth config in memory - loading from filesystem")
+ self._auth_configs = auth.load_config()
+
+ # Send the full auth configuration (if any exists), since the build
+ # could use any (or all) of the registries.
+ if self._auth_configs:
+ auth_data = {}
+ if self._auth_configs.get('credsStore'):
+ # Using a credentials store, we need to retrieve the
+ # credentials for each registry listed in the config.json file
+ # Matches CLI behavior: https://github.com/docker/docker/blob/
+ # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
+ # credentials/native_store.go#L68-L83
+ for registry in self._auth_configs.get('auths', {}).keys():
+ auth_data[registry] = auth.resolve_authconfig(
+ self._auth_configs, registry,
+ credstore_env=self.credstore_env,
+ )
+ else:
+ auth_data = self._auth_configs.get('auths', {}).copy()
+ # See https://github.com/docker/docker-py/issues/1683
+ if auth.INDEX_NAME in auth_data:
+ auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
+
+ log.debug(
+ 'Sending auth config ({0})'.format(
+ ', '.join(repr(k) for k in auth_data.keys())
+ )
+ )
+
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
+ else:
+ log.debug('No auth config found')
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ # Dockerfile not in context - read data to insert into tar later
+ with open(abs_dockerfile, 'r') as df:
+ return (
+ '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ df.read()
+ )
+
+ # Dockerfile is inside the context - return path relative to context root
+ if dockerfile == abs_dockerfile:
+ # Only calculate relpath if necessary to avoid errors
+ # on Windows client -> Linux Docker
+ # see https://github.com/docker/compose/issues/5969
+ dockerfile = os.path.relpath(abs_dockerfile, path)
+ return (dockerfile, None)
diff --git a/docker/api/client.py b/docker/api/client.py
new file mode 100644
index 0000000..91da1c8
--- /dev/null
+++ b/docker/api/client.py
@@ -0,0 +1,460 @@
+import json
+import struct
+from functools import partial
+
+import requests
+import requests.exceptions
+import six
+import websocket
+
+from .build import BuildApiMixin
+from .config import ConfigApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+from .. import auth
+from ..constants import (
+ DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
+ MINIMUM_DOCKER_API_VERSION
+)
+from ..errors import (
+ DockerException, InvalidVersion, TLSParameterError,
+ create_api_error_from_http_exception
+)
+from ..tls import TLSConfig
+from ..transport import SSLAdapter, UnixAdapter
+from ..utils import utils, check_resource, update_headers, config
+from ..utils.socket import frames_iter, socket_raw_iter
+from ..utils.json_stream import json_stream
+try:
+ from ..transport import NpipeAdapter
+except ImportError:
+ pass
+
+
+class APIClient(
+ requests.Session,
+ BuildApiMixin,
+ ConfigApiMixin,
+ ContainerApiMixin,
+ DaemonApiMixin,
+ ExecApiMixin,
+ ImageApiMixin,
+ NetworkApiMixin,
+ PluginApiMixin,
+ SecretApiMixin,
+ ServiceApiMixin,
+ SwarmApiMixin,
+ VolumeApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.33',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
+ u'Os': u'linux',
+ u'Version': u'17.10.0-ce'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ """
+
+ __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
+ credstore_env=None):
+ super(APIClient, self).__init__()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._general_configs = config.load_general_config()
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs
+ )
+ self.credstore_env = credstore_env
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ except NameError:
+ raise DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None:
+ self._version = DEFAULT_DOCKER_API_VERSION
+ elif isinstance(version, six.string_types):
+ if version.lower() == 'auto':
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ else:
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ raise InvalidVersion(
+ 'API versions below {} are no longer supported by this '
+ 'library.'.format(MINIMUM_DOCKER_API_VERSION)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, six.string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise create_api_error_from_http_exception(e)
+
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in six.iteritems(data):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ @check_resource('container')
+ def _attach_websocket(self, container, params=None):
+ url = self._url("/containers/{0}/attach/ws", container)
+ req = requests.Request("POST", url, params=self._attach_params(params))
+ full_url = req.prepare().url
+ full_url = full_url.replace("http://", "ws://", 1)
+ full_url = full_url.replace("https://", "wss://", 1)
+ return self._create_websocket_connection(full_url)
+
+ def _create_websocket_connection(self, url):
+ return websocket.create_connection(url)
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif six.PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ for chunk in json_stream(self._stream_helper(response, False)):
+ yield chunk
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ _, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
+ self._raise_for_status(response)
+ for out in response.iter_content(chunk_size, decode):
+ yield out
+
+ def _read_from_socket(self, response, stream, tty=False):
+ socket = self._get_raw_response_socket(response)
+
+ gen = None
+ if tty is False:
+ gen = frames_iter(socket)
+ else:
+ gen = socket_raw_iter(socket)
+
+ if stream:
+ return gen
+ else:
+ return six.binary_type().join(gen)
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ @check_resource('container')
+ def _check_is_tty(self, container):
+ cont = self.inspect_container(container)
+ return cont['Config']['Tty']
+
+ def _get_result(self, container, stream, res):
+ return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = six.binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except requests.exceptions.InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(dockercfg_path)
diff --git a/docker/api/config.py b/docker/api/config.py
new file mode 100644
index 0000000..767bef2
--- /dev/null
+++ b/docker/api/config.py
@@ -0,0 +1,91 @@
+import base64
+
+import six
+
+from .. import utils
+
+
+class ConfigApiMixin(object):
+ @utils.minimum_version('1.30')
+ def create_config(self, name, data, labels=None):
+ """
+ Create a config
+
+ Args:
+ name (string): Name of the config
+ data (bytes): Config data to be stored
+ labels (dict): A mapping of labels to assign to the config
+
+ Returns (dict): ID of the newly created config
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ url = self._url('/configs/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('id')
+ def inspect_config(self, id):
+ """
+ Retrieve config metadata
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('id')
+ def remove_config(self, id):
+ """
+ Remove a config
+
+ Args:
+ id (string): Full ID of the config to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no config with that ID exists
+ """
+ url = self._url('/configs/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.30')
+ def configs(self, filters=None):
+ """
+ List configs
+
+ Args:
+ filters (dict): A map of filters to process on the configs
+ list. Available filters: ``names``
+
+ Returns (list): A list of configs
+ """
+ url = self._url('/configs')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/container.py b/docker/api/container.py
new file mode 100644
index 0000000..d4f75f5
--- /dev/null
+++ b/docker/api/container.py
@@ -0,0 +1,1254 @@
+import six
+from datetime import datetime
+
+from .. import errors
+from .. import utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..types import (
+ CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
+ NetworkingConfig
+)
+
+
+class ContainerApiMixin(object):
+ @utils.check_resource('container')
+ def attach(self, container, stdout=True, stderr=True,
+ stream=False, logs=False):
+ """
+ Attach to a container.
+
+ The ``.logs()`` function is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ container (str): The container to attach to.
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'logs': logs and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'stderr': stderr and 1 or 0,
+ 'stream': stream and 1 or 0
+ }
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ u = self._url("/containers/{0}/attach", container)
+ response = self._post(u, headers=headers, params=params, stream=True)
+
+ output = self._read_from_socket(
+ response, stream, self._check_is_tty(container)
+ )
+
+ if stream:
+ return CancellableStream(output, response)
+ else:
+ return output
+
+ @utils.check_resource('container')
+ def attach_socket(self, container, params=None, ws=False):
+ """
+ Like ``attach``, but returns the underlying socket-like object for the
+ HTTP request.
+
+ Args:
+ container (str): The container to attach to.
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ For ``detachKeys``, ~/.docker/config.json is used by default.
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if params is None:
+ params = {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ if 'detachKeys' not in params \
+ and 'detachKeys' in self._general_configs:
+
+ params['detachKeys'] = self._general_configs['detachKeys']
+
+ if ws:
+ return self._attach_websocket(container, params)
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ u = self._url("/containers/{0}/attach", container)
+ return self._get_raw_response_socket(
+ self.post(
+ u, None, params=self._attach_params(params), stream=True,
+ headers=headers
+ )
+ )
+
+ @utils.check_resource('container')
+ def commit(self, container, repository=None, tag=None, message=None,
+ author=None, changes=None, conf=None):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ container (str): The image hash of the container
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'container': container,
+ 'repo': repository,
+ 'tag': tag,
+ 'comment': message,
+ 'author': author,
+ 'changes': changes
+ }
+ u = self._url("/commit")
+ return self._result(
+ self._post_json(u, data=conf, params=params), json=True
+ )
+
+ def containers(self, quiet=False, all=False, trunc=False, latest=False,
+ since=None, before=None, limit=-1, size=False,
+ filters=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ quiet (bool): Only display numeric Ids
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ trunc (bool): Truncate output
+ latest (bool): Show only the latest created container, include
+ non-running ones.
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ size (bool): Display sizes
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ A list of dicts, one per container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'limit': 1 if latest else limit,
+ 'all': 1 if all else 0,
+ 'size': 1 if size else 0,
+ 'trunc_cmd': 1 if trunc else 0,
+ 'since': since,
+ 'before': before
+ }
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ u = self._url("/containers/json")
+ res = self._result(self._get(u, params=params), True)
+
+ if quiet:
+ return [{'Id': x['Id']} for x in res]
+ if trunc:
+ for x in res:
+ x['Id'] = x['Id'][:12]
+ return res
+
+ def create_container(self, image, command=None, hostname=None, user=None,
+ detach=False, stdin_open=False, tty=False, ports=None,
+ environment=None, volumes=None,
+ network_disabled=False, name=None, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None,
+ mac_address=None, labels=None, stop_signal=None,
+ networking_config=None, healthcheck=None,
+ stop_timeout=None, runtime=None):
+ """
+ Creates a container. Parameters are similar to those for the ``docker
+ run`` command except it doesn't support the attach options (``-a``).
+
+ The arguments that are passed directly to this function are
+ host-independent configuration options. Host-specific configuration
+ is passed with the `host_config` argument. You'll normally want to
+ use this method in combination with the :py:meth:`create_host_config`
+ method to generate ``host_config``.
+
+ **Port bindings**
+
+ Port binding is done in two parts: first, provide a list of ports to
+ open inside the container with the ``ports`` parameter, then declare
+ bindings with the ``host_config`` parameter. For example:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[1111, 2222],
+ host_config=cli.create_host_config(port_bindings={
+ 1111: 4567,
+ 2222: None
+ })
+ )
+
+
+ You can limit the host address on which the port will be exposed like
+ such:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+
+ Or without host port assignment:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+
+ If you wish to use UDP instead of TCP (default), you need to declare
+ ports as such in both the config and host config:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
+ host_config=cli.create_host_config(port_bindings={
+ '1111/udp': 4567, 2222: None
+ })
+ )
+
+ To bind multiple host ports to a single container port, use the
+ following syntax:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [1234, 4567]
+ })
+
+ You can also bind multiple IPs to a single container port:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [
+ ('192.168.0.100', 1234),
+ ('192.168.0.101', 1234)
+ ]
+ })
+
+ **Using volumes**
+
+ Volume declaration is done in two parts. Provide a list of
+ paths to use as mountpoints inside the container with the
+ ``volumes`` parameter, and declare mappings from paths on the host
+ in the ``host_config`` section.
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds={
+ '/home/user1/': {
+ 'bind': '/mnt/vol2',
+ 'mode': 'rw',
+ },
+ '/var/www': {
+ 'bind': '/mnt/vol1',
+ 'mode': 'ro',
+ }
+ })
+ )
+
+ You can alternatively specify binds as a list. This code is equivalent
+ to the example above:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ])
+ )
+
+ **Networking**
+
+ You can specify networks to connect the container to by using the
+ ``networking_config`` parameter. At the time of creation, you can
+ only connect a container to a single networking, but you
+ can create more connections by using
+ :py:meth:`~connect_container_to_network`.
+
+ For example:
+
+ .. code-block:: python
+
+ networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config(
+ ipv4_address='172.28.0.124',
+ aliases=['foo', 'bar'],
+ links=['container2']
+ )
+ })
+
+ ctnr = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ Args:
+ image (str): The image to run
+ command (str or list): The command to be run in the container
+ hostname (str): Optional hostname for the container
+ user (str or int): Username or UID
+ detach (bool): Detached mode: run container in the background and
+ return container ID
+ stdin_open (bool): Keep STDIN open even if not attached
+ tty (bool): Allocate a pseudo-TTY
+ ports (list of ints): A list of port numbers
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ volumes (str or list): List of paths inside the container to use
+ as volumes.
+ network_disabled (bool): Disable networking
+ name (str): A name for the container
+ entrypoint (str or list): An entrypoint
+ working_dir (str): Path to the working directory
+ domainname (str): The domain name to use for the container
+ host_config (dict): A dictionary created with
+ :py:meth:`create_host_config`.
+ mac_address (str): The Mac Address to assign the container
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ stop_timeout (int): Timeout to stop the container, in seconds.
+ Default: 10
+ networking_config (dict): A networking configuration generated
+ by :py:meth:`create_networking_config`.
+ runtime (str): Runtime to use with this container.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+
+ Returns:
+ A dictionary with an image 'Id' key and a 'Warnings' key.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ config = self.create_container_config(
+ image, command, hostname, user, detach, stdin_open, tty,
+ ports, environment, volumes,
+ network_disabled, entrypoint, working_dir, domainname,
+ host_config, mac_address, labels,
+ stop_signal, networking_config, healthcheck,
+ stop_timeout, runtime
+ )
+ return self.create_container_from_config(config, name)
+
+ def create_container_config(self, *args, **kwargs):
+ return ContainerConfig(self._version, *args, **kwargs)
+
+ def create_container_from_config(self, config, name=None):
+ u = self._url("/containers/create")
+ params = {
+ 'name': name
+ }
+ res = self._post_json(u, data=config, params=params)
+ return self._result(res, True)
+
+ def create_host_config(self, *args, **kwargs):
+ """
+ Create a dictionary for the ``host_config`` argument to
+ :py:meth:`create_container`.
+
+ Args:
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ binds (dict): Volumes to bind. See :py:meth:`create_container`
+ for more information.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file
+ dns_search (:py:class:`list`): DNS search domains.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ lxc_conf (dict): LXC config.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``binds``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ port_bindings (dict): See :py:meth:`create_container`
+ for more information.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of dicts.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ runtime (str): Runtime to use with this container.
+
+
+ Returns:
+ (dict) A dictionary which can be passed to the ``host_config``
+ argument to :py:meth:`create_container`.
+
+ Example:
+
+ >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ volumes_from=['nostalgic_newton'])
+ {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
+ 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
+
+"""
+ if not kwargs:
+ kwargs = {}
+ if 'version' in kwargs:
+ raise TypeError(
+ "create_host_config() got an unexpected "
+ "keyword argument 'version'"
+ )
+ kwargs['version'] = self._version
+ return HostConfig(*args, **kwargs)
+
+ def create_networking_config(self, *args, **kwargs):
+ """
+ Create a networking config dictionary to be used as the
+ ``networking_config`` parameter in :py:meth:`create_container`.
+
+ Args:
+ endpoints_config (dict): A dictionary mapping network names to
+ endpoint configurations generated by
+ :py:meth:`create_endpoint_config`.
+
+ Returns:
+ (dict) A networking config.
+
+ Example:
+
+ >>> docker_client.create_network('network1')
+ >>> networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config()
+ })
+ >>> container = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ """
+ return NetworkingConfig(*args, **kwargs)
+
+ def create_endpoint_config(self, *args, **kwargs):
+ """
+ Create an endpoint config dictionary to be used with
+ :py:meth:`create_networking_config`.
+
+ Args:
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Returns:
+ (dict) An endpoint config.
+
+ Example:
+
+ >>> endpoint_config = client.create_endpoint_config(
+ aliases=['web', 'app'],
+ links=['app_db'],
+ ipv4_address='132.65.0.123'
+ )
+
+ """
+ return EndpointConfig(self._version, *args, **kwargs)
+
+ @utils.check_resource('container')
+ def diff(self, container):
+ """
+ Inspect changes on a container's filesystem.
+
+ Args:
+ container (str): The container to diff
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/containers/{0}/changes", container)), True
+ )
+
+ @utils.check_resource('container')
+ def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of a filesystem as a tar archive.
+
+ Args:
+ container (str): The container to export
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): The archived filesystem data stream
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ res = self._get(
+ self._url("/containers/{0}/export", container), stream=True
+ )
+ return self._stream_raw_result(res, chunk_size, False)
+
+ @utils.check_resource('container')
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Retrieve a file or folder from a container in the form of a tar
+ archive.
+
+ Args:
+ container (str): The container where the file is located
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'path': path
+ }
+ url = self._url('/containers/{0}/archive', container)
+ res = self._get(url, params=params, stream=True)
+ self._raise_for_status(res)
+ encoded_stat = res.headers.get('x-docker-container-path-stat')
+ return (
+ self._stream_raw_result(res, chunk_size, False),
+ utils.decode_json_header(encoded_stat) if encoded_stat else None
+ )
+
+ @utils.check_resource('container')
+ def inspect_container(self, container):
+ """
+ Identical to the `docker inspect` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of `docker inspect`, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/containers/{0}/json", container)), True
+ )
+
+ @utils.check_resource('container')
+ def kill(self, container, signal=None):
+ """
+ Kill a container or send a signal to a container.
+
+ Args:
+ container (str): The container to kill
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/kill", container)
+ params = {}
+ if signal is not None:
+ if not isinstance(signal, six.string_types):
+ signal = int(signal)
+ params['signal'] = signal
+ res = self._post(url, params=params)
+
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def logs(self, container, stdout=True, stderr=True, stream=False,
+ timestamps=False, tail='all', since=None, follow=None,
+ until=None):
+ """
+ Get logs from a container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ container (str): The container to get logs from
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
+
+ Returns:
+ (generator or str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if follow is None:
+ follow = stream
+ params = {'stderr': stderr and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'timestamps': timestamps and 1 or 0,
+ 'follow': follow and 1 or 0,
+ }
+ if tail != 'all' and (not isinstance(tail, int) or tail < 0):
+ tail = 'all'
+ params['tail'] = tail
+
+ if since is not None:
+ if isinstance(since, datetime):
+ params['since'] = utils.datetime_to_timestamp(since)
+ elif (isinstance(since, int) and since > 0):
+ params['since'] = since
+ else:
+ raise errors.InvalidArgument(
+ 'since value should be datetime or positive int, '
+ 'not {}'.format(type(since))
+ )
+
+ if until is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'until is not supported for API version < 1.35'
+ )
+ if isinstance(until, datetime):
+ params['until'] = utils.datetime_to_timestamp(until)
+ elif (isinstance(until, int) and until > 0):
+ params['until'] = until
+ else:
+ raise errors.InvalidArgument(
+ 'until value should be datetime or positive int, '
+ 'not {}'.format(type(until))
+ )
+
+ url = self._url("/containers/{0}/logs", container)
+ res = self._get(url, params=params, stream=stream)
+ output = self._get_result(container, stream, res)
+
+ if stream:
+ return CancellableStream(output, res)
+ else:
+ return output
+
+ @utils.check_resource('container')
+ def pause(self, container):
+ """
+ Pauses all processes within a container.
+
+ Args:
+ container (str): The container to pause
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/containers/{0}/pause', container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def port(self, container, private_port):
+ """
+ Lookup the public-facing port that is NAT-ed to ``private_port``.
+ Identical to the ``docker port`` command.
+
+ Args:
+ container (str): The container to look up
+ private_port (int): The private port to inspect
+
+ Returns:
+ (list of dict): The mapping for the host ports
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ .. code-block:: bash
+
+ $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
+ 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
+
+ .. code-block:: python
+
+ >>> cli.port('7174d6347063', 80)
+ [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
+ """
+ res = self._get(self._url("/containers/{0}/json", container))
+ self._raise_for_status(res)
+ json_ = res.json()
+ private_port = str(private_port)
+ h_ports = None
+
+ # Port settings is None when the container is running with
+ # network_mode=host.
+ port_settings = json_.get('NetworkSettings', {}).get('Ports')
+ if port_settings is None:
+ return None
+
+ if '/' in private_port:
+ return port_settings.get(private_port)
+
+ h_ports = port_settings.get(private_port + '/tcp')
+ if h_ports is None:
+ h_ports = port_settings.get(private_port + '/udp')
+
+ return h_ports
+
+ @utils.check_resource('container')
+ def put_archive(self, container, path, data):
+ """
+ Insert a file or folder in an existing container using a tar archive as
+ source.
+
+ Args:
+ container (str): The container where the file(s) will be extracted
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'path': path}
+ url = self._url('/containers/{0}/archive', container)
+ res = self._put(url, params=params, data=data)
+ self._raise_for_status(res)
+ return res.status_code == 200
+
+ @utils.minimum_version('1.25')
+ def prune_containers(self, filters=None):
+ """
+ Delete stopped containers
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted container IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/containers/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @utils.check_resource('container')
+ def remove_container(self, container, v=False, link=False, force=False):
+ """
+ Remove a container. Similar to the ``docker rm`` command.
+
+ Args:
+ container (str): The container to remove
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'v': v, 'link': link, 'force': force}
+ res = self._delete(
+ self._url("/containers/{0}", container), params=params
+ )
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def rename(self, container, name):
+ """
+ Rename a container. Similar to the ``docker rename`` command.
+
+ Args:
+ container (str): ID of the container to rename
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/rename", container)
+ params = {'name': name}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def resize(self, container, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ container (str or dict): The container to resize
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'h': height, 'w': width}
+ url = self._url("/containers/{0}/resize", container)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def restart(self, container, timeout=10):
+ """
+ Restart a container. Similar to the ``docker restart`` command.
+
+ Args:
+ container (str or dict): The container to restart. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {'t': timeout}
+ url = self._url("/containers/{0}/restart", container)
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def start(self, container, *args, **kwargs):
+ """
+ Start a container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
+
+
+ Args:
+ container (str): The container to start
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
+
+ Example:
+
+ >>> container = cli.create_container(
+ ... image='busybox:latest',
+ ... command='/bin/sleep 30')
+ >>> cli.start(container=container.get('Id'))
+ """
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
+ url = self._url("/containers/{0}/start", container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def stats(self, container, decode=None, stream=True):
+ """
+ Stream statistics for a specific container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ container (str): The container to stream statistics from
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ url = self._url("/containers/{0}/stats", container)
+ if stream:
+ return self._stream_helper(self._get(url, stream=True),
+ decode=decode)
+ else:
+ return self._result(self._get(url, params={'stream': False}),
+ json=True)
+
+ @utils.check_resource('container')
+ def stop(self, container, timeout=None):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ container (str): The container to stop
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. If None, then the
+ StopTimeout value of the container will be used.
+ Default: None
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if timeout is None:
+ params = {}
+ timeout = 10
+ else:
+ params = {'t': timeout}
+ url = self._url("/containers/{0}/stop", container)
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
+ self._raise_for_status(res)
+
+ @utils.check_resource('container')
+ def top(self, container, ps_args=None):
+ """
+ Display the running processes of a container.
+
+ Args:
+ container (str): The container to inspect
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ u = self._url("/containers/{0}/top", container)
+ params = {}
+ if ps_args is not None:
+ params['ps_args'] = ps_args
+ return self._result(self._get(u, params=params), True)
+
+ @utils.check_resource('container')
+ def unpause(self, container):
+ """
+ Unpause all processes within a container.
+
+ Args:
+ container (str): The container to unpause
+ """
+ url = self._url('/containers/{0}/unpause', container)
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @utils.minimum_version('1.22')
+ @utils.check_resource('container')
+ def update_container(
+ self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
+ cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
+ mem_reservation=None, memswap_limit=None, kernel_memory=None,
+ restart_policy=None
+ ):
+ """
+ Update resource configs of one or more containers.
+
+ Args:
+ container (str): The container to inspect
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/containers/{0}/update', container)
+ data = {}
+ if blkio_weight:
+ data['BlkioWeight'] = blkio_weight
+ if cpu_period:
+ data['CpuPeriod'] = cpu_period
+ if cpu_shares:
+ data['CpuShares'] = cpu_shares
+ if cpu_quota:
+ data['CpuQuota'] = cpu_quota
+ if cpuset_cpus:
+ data['CpusetCpus'] = cpuset_cpus
+ if cpuset_mems:
+ data['CpusetMems'] = cpuset_mems
+ if mem_limit:
+ data['Memory'] = utils.parse_bytes(mem_limit)
+ if mem_reservation:
+ data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
+ if memswap_limit:
+ data['MemorySwap'] = utils.parse_bytes(memswap_limit)
+ if kernel_memory:
+ data['KernelMemory'] = utils.parse_bytes(kernel_memory)
+ if restart_policy:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'restart policy update is not supported '
+ 'for API version < 1.23'
+ )
+ data['RestartPolicy'] = restart_policy
+
+ res = self._post_json(url, data=data)
+ return self._result(res, True)
+
+ @utils.check_resource('container')
+ def wait(self, container, timeout=None, condition=None):
+ """
+ Block until a container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ container (str or dict): The container to wait on. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/containers/{0}/wait", container)
+ params = {}
+ if condition is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'wait condition is not supported for API version < 1.30'
+ )
+ params['condition'] = condition
+
+ res = self._post(url, timeout=timeout, params=params)
+ return self._result(res, True)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
new file mode 100644
index 0000000..76a94cf
--- /dev/null
+++ b/docker/api/daemon.py
@@ -0,0 +1,181 @@
+import os
+from datetime import datetime
+
+from .. import auth, types, utils
+
+
+class DaemonApiMixin(object):
+ @utils.minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
+ def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ A :py:class:`docker.types.daemon.CancellableStream` generator
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events()
+ ... print event
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ if isinstance(since, datetime):
+ since = utils.datetime_to_timestamp(since)
+
+ if isinstance(until, datetime):
+ until = utils.datetime_to_timestamp(until)
+
+ if filters:
+ filters = utils.convert_filters(filters)
+
+ params = {
+ 'since': since,
+ 'until': until,
+ 'filters': filters
+ }
+ url = self._url('/events')
+
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return types.CancellableStream(stream, response)
+
+ def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url("/info")), True)
+
+ def login(self, username, password=None, email=None, registry=None,
+ reauth=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ # If we don't have any auth data so far, try reloading the config file
+ # one more time in case anything showed up in there.
+ # If dockercfg_path is passed check to see if the config file exists,
+ # if so load that config.
+ if dockercfg_path and os.path.exists(dockercfg_path):
+ self._auth_configs = auth.load_config(dockercfg_path)
+ elif not self._auth_configs:
+ self._auth_configs = auth.load_config()
+
+ authcfg = auth.resolve_authconfig(
+ self._auth_configs, registry, credstore_env=self.credstore_env,
+ )
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': username,
+ 'password': password,
+ 'email': email,
+ 'serveraddress': registry,
+ }
+
+ response = self._post_json(self._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ if 'auths' not in self._auth_configs:
+ self._auth_configs['auths'] = {}
+ self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
+ return self._result(response, json=True)
+
+ def ping(self):
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
+
+ def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/version", versioned_api=api_version)
+ return self._result(self._get(url), json=True)
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
new file mode 100644
index 0000000..986d87f
--- /dev/null
+++ b/docker/api/exec_api.py
@@ -0,0 +1,165 @@
+import six
+
+from .. import errors
+from .. import utils
+
+
+class ExecApiMixin(object):
+ @utils.check_resource('container')
+ def exec_create(self, container, cmd, stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=False, user='',
+ environment=None, workdir=None, detach_keys=None):
+ """
+ Sets up an exec instance in a running container.
+
+ Args:
+ container (str): Target container where exec instance will be
+ created
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+ detach_keys (str): Override the key sequence for detaching
+ a container. Format is a single character `[a-Z]`
+ or `ctrl-<value>` where `<value>` is one of:
+ `a-z`, `@`, `^`, `[`, `,` or `_`.
+ ~/.docker/config.json is used by default.
+
+ Returns:
+ (dict): A dictionary with an exec ``Id`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if environment is not None and utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Setting environment for exec is not supported in API < 1.25'
+ )
+
+ if isinstance(cmd, six.string_types):
+ cmd = utils.split_command(cmd)
+
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ data = {
+ 'Container': container,
+ 'User': user,
+ 'Privileged': privileged,
+ 'Tty': tty,
+ 'AttachStdin': stdin,
+ 'AttachStdout': stdout,
+ 'AttachStderr': stderr,
+ 'Cmd': cmd,
+ 'Env': environment,
+ }
+
+ if workdir is not None:
+ if utils.version_lt(self._version, '1.35'):
+ raise errors.InvalidVersion(
+ 'workdir is not supported for API version < 1.35'
+ )
+ data['WorkingDir'] = workdir
+
+ if detach_keys:
+ data['detachKeys'] = detach_keys
+ elif 'detachKeys' in self._general_configs:
+ data['detachKeys'] = self._general_configs['detachKeys']
+
+ url = self._url('/containers/{0}/exec', container)
+ res = self._post_json(url, data=data)
+ return self._result(res, True)
+
+ def exec_inspect(self, exec_id):
+ """
+ Return low-level information about an exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+
+ Returns:
+ (dict): Dictionary of values returned by the endpoint.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+ res = self._get(self._url("/exec/{0}/json", exec_id))
+ return self._result(res, True)
+
+ def exec_resize(self, exec_id, height=None, width=None):
+ """
+ Resize the tty session used by the specified exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ height (int): Height of tty session
+ width (int): Width of tty session
+ """
+
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+
+ params = {'h': height, 'w': width}
+ url = self._url("/exec/{0}/resize", exec_id)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @utils.check_resource('exec_id')
+ def exec_start(self, exec_id, detach=False, tty=False, stream=False,
+ socket=False):
+ """
+ Start a previously set up exec instance.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations.
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. If ``socket=True``, a socket object for the
+ connection. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # we want opened socket if socket == True
+
+ data = {
+ 'Tty': tty,
+ 'Detach': detach
+ }
+
+ headers = {} if detach else {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ res = self._post_json(
+ self._url('/exec/{0}/start', exec_id),
+ headers=headers,
+ data=data,
+ stream=True
+ )
+ if detach:
+ return self._result(res)
+ if socket:
+ return self._get_raw_response_socket(res)
+ return self._read_from_socket(res, stream, tty)
diff --git a/docker/api/image.py b/docker/api/image.py
new file mode 100644
index 0000000..5f05d88
--- /dev/null
+++ b/docker/api/image.py
@@ -0,0 +1,562 @@
+import logging
+import os
+
+import six
+
+from .. import auth, errors, utils
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+
+log = logging.getLogger(__name__)
+
+
+class ImageApiMixin(object):
+
+ @utils.check_resource('image')
+ def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ image (str): Image name to get
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ res = self._get(self._url("/images/{0}/get", image), stream=True)
+ return self._stream_raw_result(res, chunk_size, False)
+
+ @utils.check_resource('image')
+ def history(self, image):
+ """
+ Show the history of an image.
+
+ Args:
+ image (str): The image to show history for
+
+ Returns:
+ (str): The history of the image
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ res = self._get(self._url("/images/{0}/history", image))
+ return self._result(res, True)
+
+ def images(self, name=None, quiet=False, all=False, filters=None):
+ """
+ List images. Similar to the ``docker images`` command.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ quiet (bool): Only return numeric IDs as a list.
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (dict or list): A list if ``quiet=True``, otherwise a dict.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'filter': name,
+ 'only_ids': 1 if quiet else 0,
+ 'all': 1 if all else 0,
+ }
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ res = self._result(self._get(self._url("/images/json"), params=params),
+ True)
+ if quiet:
+ return [x['Id'] for x in res]
+ return res
+
+ def import_image(self, src=None, repository=None, tag=None, image=None,
+ changes=None, stream_src=False):
+ """
+ Import an image. Similar to the ``docker import`` command.
+
+ If ``src`` is a string or unicode string, it will first be treated as a
+ path to a tarball on the local system. If there is an error reading
+ from that file, ``src`` will be treated as a URL instead to fetch the
+ image from. You can also pass an open file handle as ``src``, in which
+ case the data will be read from that file.
+
+ If ``src`` is unset but ``image`` is set, the ``image`` parameter will
+ be taken as the name of an existing image to import from.
+
+ Args:
+ src (str or file): Path to tarfile, URL, or file-like object
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ image (str): Use another image like the ``FROM`` Dockerfile
+ parameter
+ """
+ if not (src or image):
+ raise errors.DockerException(
+ 'Must specify src or image to import from'
+ )
+ u = self._url('/images/create')
+
+ params = _import_image_params(
+ repository, tag, image,
+ src=(src if isinstance(src, six.string_types) else None),
+ changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
+
+ if image or params.get('fromSrc') != '-': # from image or URL
+ return self._result(
+ self._post(u, data=None, params=params)
+ )
+ elif isinstance(src, six.string_types): # from file path
+ with open(src, 'rb') as f:
+ return self._result(
+ self._post(
+ u, data=f, params=params, headers=headers, timeout=None
+ )
+ )
+ else: # from raw data
+ if stream_src:
+ headers['Transfer-Encoding'] = 'chunked'
+ return self._result(
+ self._post(u, data=src, params=params, headers=headers)
+ )
+
+ def import_image_from_data(self, data, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
+ allows importing in-memory bytes data.
+
+ Args:
+ data (bytes collection): Bytes collection containing valid tar data
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+
+ u = self._url('/images/create')
+ params = _import_image_params(
+ repository, tag, src='-', changes=changes
+ )
+ headers = {'Content-Type': 'application/tar'}
+ return self._result(
+ self._post(
+ u, data=data, params=params, headers=headers, timeout=None
+ )
+ )
+
+ def import_image_from_file(self, filename, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a tar file on disk.
+
+ Args:
+ filename (str): Full path to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+
+ Raises:
+ IOError: File does not exist.
+ """
+
+ return self.import_image(
+ src=filename, repository=repository, tag=tag, changes=changes
+ )
+
+ def import_image_from_stream(self, stream, repository=None, tag=None,
+ changes=None):
+ return self.import_image(
+ src=stream, stream_src=True, repository=repository, tag=tag,
+ changes=changes
+ )
+
+ def import_image_from_url(self, url, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a URL.
+
+ Args:
+ url (str): A URL pointing to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+ return self.import_image(
+ src=url, repository=repository, tag=tag, changes=changes
+ )
+
+ def import_image_from_image(self, image, repository=None, tag=None,
+ changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from another image, like the ``FROM`` Dockerfile
+ parameter.
+
+ Args:
+ image (str): Image name to import from
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+ return self.import_image(
+ image=image, repository=repository, tag=tag, changes=changes
+ )
+
+ @utils.check_resource('image')
+ def inspect_image(self, image):
+ """
+ Get detailed information about an image. Similar to the ``docker
+ inspect`` command, but only for images.
+
+ Args:
+ image (str): The image to inspect
+
+ Returns:
+ (dict): Similar to the output of ``docker inspect``, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/images/{0}/json", image)), True
+ )
+
+ @utils.minimum_version('1.30')
+ @utils.check_resource('image')
+ def inspect_distribution(self, image):
+ """
+ Get image digest and platform information by contacting the registry.
+
+ Args:
+ image (str): The image name to inspect
+
+ Returns:
+ (dict): A dict containing distribution data
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self._result(
+ self._get(self._url("/distribution/{0}/json", image)), True
+ )
+
+ def load_image(self, data, quiet=None):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
+ save``). Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+ quiet (boolean): Suppress progress details in response.
+
+ Returns:
+ (generator): Progress output as JSON objects. Only available for
+ API version >= 1.23
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+
+ if quiet is not None:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'quiet is not supported in API version < 1.23'
+ )
+ params['quiet'] = quiet
+
+ res = self._post(
+ self._url("/images/load"), data=data, params=params, stream=True
+ )
+ if utils.version_gte(self._version, '1.23'):
+ return self._stream_helper(res, decode=True)
+
+ self._raise_for_status(res)
+
+ @utils.minimum_version('1.25')
+ def prune_images(self, filters=None):
+ """
+ Delete unused images
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+
+ Returns:
+ (dict): A dict containing a list of deleted image IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/images/prune")
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._post(url, params=params), True)
+
+ def pull(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False, platform=None):
+ """
+ Pulls an image. Similar to the ``docker pull`` command.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ stream (bool): Stream the output as a generator
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+
+ Returns:
+ (generator or str): The output
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for line in cli.pull('busybox', stream=True):
+ ... print(json.dumps(json.loads(line), indent=4))
+ {
+ "status": "Pulling image (latest) from busybox",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+ {
+ "status": "Pulling image (latest) from busybox, endpoint: ...",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+
+ """
+ if not tag:
+ repository, tag = utils.parse_repository_tag(repository)
+ registry, repo_name = auth.resolve_repository_name(repository)
+
+ params = {
+ 'tag': tag,
+ 'fromImage': repository
+ }
+ headers = {}
+
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ if platform is not None:
+ if utils.version_lt(self._version, '1.32'):
+ raise errors.InvalidVersion(
+ 'platform was only introduced in API version 1.32'
+ )
+ params['platform'] = platform
+
+ response = self._post(
+ self._url('/images/create'), params=params, headers=headers,
+ stream=stream, timeout=None
+ )
+
+ self._raise_for_status(response)
+
+ if stream:
+ return self._stream_helper(response, decode=decode)
+
+ return self._result(response)
+
+ def push(self, repository, tag=None, stream=False, auth_config=None,
+ decode=False):
+ """
+ Push an image or a repository to the registry. Similar to the ``docker
+ push`` command.
+
+ Args:
+ repository (str): The repository to push to
+ tag (str): An optional tag to push
+ stream (bool): Stream the output as a blocking generator
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ decode (bool): Decode the JSON data from the server into dicts.
+ Only applies with ``stream=True``
+
+ Returns:
+ (generator or str): The output from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ >>> for line in cli.push('yourname/app', stream=True):
+ ... print line
+ {"status":"Pushing repository yourname/app (1 tags)"}
+ {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
+ {"status":"Image already pushed, skipping","progressDetail":{},
+ "id":"511136ea3c5a"}
+ ...
+
+ """
+ if not tag:
+ repository, tag = utils.parse_repository_tag(repository)
+ registry, repo_name = auth.resolve_repository_name(repository)
+ u = self._url("/images/{0}/push", repository)
+ params = {
+ 'tag': tag
+ }
+ headers = {}
+
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ response = self._post_json(
+ u, None, headers=headers, stream=stream, params=params
+ )
+
+ self._raise_for_status(response)
+
+ if stream:
+ return self._stream_helper(response, decode=decode)
+
+ return self._result(response)
+
+ @utils.check_resource('image')
+ def remove_image(self, image, force=False, noprune=False):
+ """
+ Remove an image. Similar to the ``docker rmi`` command.
+
+ Args:
+ image (str): The image to remove
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+ """
+ params = {'force': force, 'noprune': noprune}
+ res = self._delete(self._url("/images/{0}", image), params=params)
+ return self._result(res, True)
+
+ def search(self, term):
+ """
+ Search for images on Docker Hub. Similar to the ``docker search``
+ command.
+
+ Args:
+ term (str): A term to search for.
+
+ Returns:
+ (list of dicts): The response of the search.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(
+ self._get(self._url("/images/search"), params={'term': term}),
+ True
+ )
+
+ @utils.check_resource('image')
+ def tag(self, image, repository, tag=None, force=False):
+ """
+ Tag an image into a repository. Similar to the ``docker tag`` command.
+
+ Args:
+ image (str): The image to tag
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Returns:
+ (bool): ``True`` if successful
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ force=True)
+ """
+ params = {
+ 'tag': tag,
+ 'repo': repository,
+ 'force': 1 if force else 0
+ }
+ url = self._url("/images/{0}/tag", image)
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return res.status_code == 201
+
+
+def is_file(src):
+ try:
+ return (
+ isinstance(src, six.string_types) and
+ os.path.isfile(src)
+ )
+ except TypeError: # a data string will make isfile() raise a TypeError
+ return False
+
+
+def _import_image_params(repo, tag, image=None, src=None,
+ changes=None):
+ params = {
+ 'repo': repo,
+ 'tag': tag,
+ }
+ if image:
+ params['fromImage'] = image
+ elif src and not is_file(src):
+ params['fromSrc'] = src
+ else:
+ params['fromSrc'] = '-'
+
+ if changes:
+ params['changes'] = changes
+
+ return params
diff --git a/docker/api/network.py b/docker/api/network.py
new file mode 100644
index 0000000..57ed8d3
--- /dev/null
+++ b/docker/api/network.py
@@ -0,0 +1,272 @@
+from ..errors import InvalidVersion
+from ..utils import check_resource, minimum_version
+from ..utils import version_lt
+from .. import utils
+
+
+class NetworkApiMixin(object):
+ def networks(self, names=None, ids=None, filters=None):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by
+ ids (:py:class:`list`): List of ids to filter by
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+
+ Returns:
+ (dict): List of network objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if filters is None:
+ filters = {}
+ if names:
+ filters['name'] = names
+ if ids:
+ filters['id'] = ids
+ params = {'filters': utils.convert_filters(filters)}
+ url = self._url("/networks")
+ res = self._get(url, params=params)
+ return self._result(res, json=True)
+
+ def create_network(self, name, driver=None, options=None, ipam=None,
+ check_duplicate=None, internal=False, labels=None,
+ enable_ipv6=False, attachable=None, scope=None,
+ ingress=None):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (dict): The created network reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.create_network("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> docker_client.create_network("network1", driver="bridge",
+ ipam=ipam_config)
+ """
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
+ data = {
+ 'Name': name,
+ 'Driver': driver,
+ 'Options': options,
+ 'IPAM': ipam,
+ 'CheckDuplicate': check_duplicate,
+ }
+
+ if labels is not None:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'network labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ if enable_ipv6:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'enable_ipv6 was introduced in API 1.23'
+ )
+ data['EnableIPv6'] = True
+
+ if internal:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion('Internal networks are not '
+ 'supported in API version < 1.22')
+ data['Internal'] = True
+
+ if attachable is not None:
+ if version_lt(self._version, '1.24'):
+ raise InvalidVersion(
+ 'attachable is not supported in API version < 1.24'
+ )
+ data['Attachable'] = attachable
+
+ if ingress is not None:
+ if version_lt(self._version, '1.29'):
+ raise InvalidVersion(
+ 'ingress is not supported in API version < 1.29'
+ )
+
+ data['Ingress'] = ingress
+
+ if scope is not None:
+ if version_lt(self._version, '1.30'):
+ raise InvalidVersion(
+ 'scope is not supported in API version < 1.30'
+ )
+ data['Scope'] = scope
+
+ url = self._url("/networks/create")
+ res = self._post_json(url, data=data)
+ return self._result(res, json=True)
+
+ @minimum_version('1.25')
+ def prune_networks(self, filters=None):
+ """
+ Delete unused networks
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted network names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/networks/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @check_resource('net_id')
+ def remove_network(self, net_id):
+ """
+ Remove a network. Similar to the ``docker network rm`` command.
+
+ Args:
+ net_id (str): The network's id
+ """
+ url = self._url("/networks/{0}", net_id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+
+ @check_resource('net_id')
+ def inspect_network(self, net_id, verbose=None, scope=None):
+ """
+ Get detailed information about a network.
+
+ Args:
+ net_id (str): ID of network
+ verbose (bool): Show the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+ """
+ params = {}
+ if verbose is not None:
+ if version_lt(self._version, '1.28'):
+ raise InvalidVersion('verbose was introduced in API 1.28')
+ params['verbose'] = verbose
+ if scope is not None:
+ if version_lt(self._version, '1.31'):
+ raise InvalidVersion('scope was introduced in API 1.31')
+ params['scope'] = scope
+
+ url = self._url("/networks/{0}", net_id)
+ res = self._get(url, params=params)
+ return self._result(res, json=True)
+
+ @check_resource('container')
+ def connect_container_to_network(self, container, net_id,
+ ipv4_address=None, ipv6_address=None,
+ aliases=None, links=None,
+ link_local_ips=None):
+ """
+ Connect a container to a network.
+
+ Args:
+ container (str): container-id/name to be connected to the network
+ net_id (str): network id
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local
+ (IPv4/IPv6) addresses.
+ """
+ data = {
+ "Container": container,
+ "EndpointConfig": self.create_endpoint_config(
+ aliases=aliases, links=links, ipv4_address=ipv4_address,
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips
+ ),
+ }
+
+ url = self._url("/networks/{0}/connect", net_id)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+
+ @check_resource('container')
+ def disconnect_container_from_network(self, container, net_id,
+ force=False):
+ """
+ Disconnect a container from a network.
+
+ Args:
+ container (str): container ID or name to be disconnected from the
+ network
+ net_id (str): network ID
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+ """
+ data = {"Container": container}
+ if force:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion(
+ 'Forced disconnect was introduced in API 1.22'
+ )
+ data['Force'] = force
+ url = self._url("/networks/{0}/disconnect", net_id)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
new file mode 100644
index 0000000..f6c0b13
--- /dev/null
+++ b/docker/api/plugin.py
@@ -0,0 +1,262 @@
+import six
+
+from .. import auth, utils
+
+
+class PluginApiMixin(object):
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def configure_plugin(self, name, options):
+ """
+ Configure a plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ options (dict): A key-value mapping of options
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/set', name)
+ data = options
+ if isinstance(data, dict):
+ data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def create_plugin(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/create')
+
+ with utils.create_archive(
+ root=plugin_data_dir, gzip=gzip,
+ files=set(utils.build.walk(plugin_data_dir, []))
+ ) as archv:
+ res = self._post(url, params={'name': name}, data=archv)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def disable_plugin(self, name):
+ """
+ Disable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/disable', name)
+ res = self._post(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def enable_plugin(self, name, timeout=0):
+ """
+ Enable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ timeout (int): Operation timeout (in seconds). Default: 0
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/enable', name)
+ params = {'timeout': timeout}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def inspect_plugin(self, name):
+ """
+ Retrieve plugin metadata.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ A dict containing plugin info
+ """
+ url = self._url('/plugins/{0}/json', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def pull_plugin(self, remote, privileges, name=None):
+ """
+ Pull and install a plugin. After the plugin is installed, it can be
+ enabled using :py:meth:`~enable_plugin`.
+
+ Args:
+ remote (string): Remote reference for the plugin to install.
+ The ``:latest`` tag is optional, and is the default if
+ omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+ name (string): Local name for the pulled plugin. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+ url = self._url('/plugins/pull')
+ params = {
+ 'remote': remote,
+ }
+ if name:
+ params['name'] = name
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
+
+ @utils.minimum_version('1.25')
+ def plugins(self):
+ """
+ Retrieve a list of installed plugins.
+
+ Returns:
+ A list of dicts, one per plugin
+ """
+ url = self._url('/plugins')
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def plugin_privileges(self, name):
+ """
+ Retrieve list of privileges to be granted to a plugin.
+
+ Args:
+ name (string): Name of the remote plugin to examine. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ A list of dictionaries representing the plugin's
+ permissions
+
+ """
+ params = {
+ 'remote': name,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+
+ url = self._url('/plugins/privileges')
+ return self._result(
+ self._get(url, params=params, headers=headers), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def push_plugin(self, name):
+ """
+ Push a plugin to the registry.
+
+ Args:
+ name (string): Name of the plugin to upload. The ``:latest``
+ tag is optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/pull', name)
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ res = self._post(url, headers=headers)
+ self._raise_for_status(res)
+ return self._stream_helper(res, decode=True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def remove_plugin(self, name, force=False):
+ """
+ Remove an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to remove. The ``:latest``
+ tag is optional, and is the default if omitted.
+ force (bool): Disable the plugin before removing. This may
+ result in issues if the plugin is in use by a container.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}', name)
+ res = self._delete(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.26')
+ @utils.check_resource('name')
+ def upgrade_plugin(self, name, remote, privileges):
+ """
+ Upgrade an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to upgrade. The ``:latest``
+ tag is optional and is the default if omitted.
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ privileges (:py:class:`list`): A list of privileges the user
+ consents to grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+
+ url = self._url('/plugins/{0}/upgrade', name)
+ params = {
+ 'remote': remote,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
diff --git a/docker/api/secret.py b/docker/api/secret.py
new file mode 100644
index 0000000..fa4c2ab
--- /dev/null
+++ b/docker/api/secret.py
@@ -0,0 +1,102 @@
+import base64
+
+import six
+
+from .. import errors
+from .. import utils
+
+
+class SecretApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_secret(self, name, data, labels=None, driver=None):
+ """
+ Create a secret
+
+ Args:
+ name (string): Name of the secret
+ data (bytes): Secret data to be stored
+ labels (dict): A mapping of labels to assign to the secret
+ driver (DriverConfig): A custom driver configuration. If
+ unspecified, the default ``internal`` driver will be used
+
+ Returns (dict): ID of the newly created secret
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ if driver is not None:
+ if utils.version_lt(self._version, '1.31'):
+ raise errors.InvalidVersion(
+ 'Secret driver is only available for API version > 1.31'
+ )
+
+ body['Driver'] = driver
+
+ url = self._url('/secrets/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_secret(self, id):
+ """
+ Retrieve secret metadata
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_secret(self, id):
+ """
+ Remove a secret
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def secrets(self, filters=None):
+ """
+ List secrets
+
+ Args:
+ filters (dict): A map of filters to process on the secrets
+ list. Available filters: ``names``
+
+ Returns (list): A list of secrets
+ """
+ url = self._url('/secrets')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/service.py b/docker/api/service.py
new file mode 100644
index 0000000..03b0ca6
--- /dev/null
+++ b/docker/api/service.py
@@ -0,0 +1,444 @@
+from .. import auth, errors, utils
+from ..types import ServiceMode
+
+
+def _check_api_features(version, task_template, update_config, endpoint_spec):
+
+ def raise_version_error(param, min_version):
+ raise errors.InvalidVersion(
+ '{} is not supported in API version < {}'.format(
+ param, min_version
+ )
+ )
+
+ if update_config is not None:
+ if utils.version_lt(version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
+ if 'Monitor' in update_config:
+ raise_version_error('UpdateConfig.monitor', '1.25')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('UpdateConfig.order', '1.29')
+
+ if endpoint_spec is not None:
+ if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
+ if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
+ raise_version_error('EndpointSpec.Ports[].mode', '1.32')
+
+ if task_template is not None:
+ if 'ForceUpdate' in task_template and utils.version_lt(
+ version, '1.25'):
+ raise_version_error('force_update', '1.25')
+
+ if task_template.get('Placement'):
+ if utils.version_lt(version, '1.30'):
+ if task_template['Placement'].get('Platforms'):
+ raise_version_error('Placement.platforms', '1.30')
+ if utils.version_lt(version, '1.27'):
+ if task_template['Placement'].get('Preferences'):
+ raise_version_error('Placement.preferences', '1.27')
+
+ if task_template.get('ContainerSpec'):
+ container_spec = task_template.get('ContainerSpec')
+
+ if utils.version_lt(version, '1.25'):
+ if container_spec.get('TTY'):
+ raise_version_error('ContainerSpec.tty', '1.25')
+ if container_spec.get('Hostname') is not None:
+ raise_version_error('ContainerSpec.hostname', '1.25')
+ if container_spec.get('Hosts') is not None:
+ raise_version_error('ContainerSpec.hosts', '1.25')
+ if container_spec.get('Groups') is not None:
+ raise_version_error('ContainerSpec.groups', '1.25')
+ if container_spec.get('DNSConfig') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.25')
+ if container_spec.get('Healthcheck') is not None:
+ raise_version_error('ContainerSpec.healthcheck', '1.25')
+
+ if utils.version_lt(version, '1.28'):
+ if container_spec.get('ReadOnly') is not None:
+ raise_version_error('ContainerSpec.dns_config', '1.28')
+ if container_spec.get('StopSignal') is not None:
+ raise_version_error('ContainerSpec.stop_signal', '1.28')
+
+ if utils.version_lt(version, '1.30'):
+ if container_spec.get('Configs') is not None:
+ raise_version_error('ContainerSpec.configs', '1.30')
+ if container_spec.get('Privileges') is not None:
+ raise_version_error('ContainerSpec.privileges', '1.30')
+
+ if utils.version_lt(version, '1.35'):
+ if container_spec.get('Isolation') is not None:
+ raise_version_error('ContainerSpec.isolation', '1.35')
+
+ if task_template.get('Resources'):
+ if utils.version_lt(version, '1.32'):
+ if task_template['Resources'].get('GenericResources'):
+ raise_version_error('Resources.generic_resources', '1.32')
+
+
+def _merge_task_template(current, override):
+ merged = current.copy()
+ if override is not None:
+ for ts_key, ts_value in override.items():
+ if ts_key == 'ContainerSpec':
+ if 'ContainerSpec' not in merged:
+ merged['ContainerSpec'] = {}
+ for cs_key, cs_value in override['ContainerSpec'].items():
+ if cs_value is not None:
+ merged['ContainerSpec'][cs_key] = cs_value
+ elif ts_value is not None:
+ merged[ts_key] = ts_value
+ return merged
+
+
+class ServiceApiMixin(object):
+ @utils.minimum_version('1.24')
+ def create_service(
+ self, task_template, name=None, labels=None, mode=None,
+ update_config=None, networks=None, endpoint_config=None,
+ endpoint_spec=None
+ ):
+ """
+ Create a service.
+
+ Args:
+ task_template (TaskTemplate): Specification of the task to start as
+ part of the new service.
+ name (string): User-defined name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ A dictionary containing an ``ID`` key for the newly created
+ service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
+
+ url = self._url('/services/create')
+ headers = {}
+ image = task_template.get('ContainerSpec', {}).get('Image', None)
+ if image is None:
+ raise errors.DockerException(
+ 'Missing mandatory Image key in ContainerSpec'
+ )
+ if mode and not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+ if utils.version_lt(self._version, '1.25'):
+ networks = networks or task_template.pop('Networks', None)
+ data = {
+ 'Name': name,
+ 'Labels': labels,
+ 'TaskTemplate': task_template,
+ 'Mode': mode,
+ 'Networks': utils.convert_service_networks(networks),
+ 'EndpointSpec': endpoint_spec
+ }
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+
+ return self._result(
+ self._post_json(url, data=data, headers=headers), True
+ )
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def inspect_service(self, service, insert_defaults=None):
+ """
+ Return information about a service.
+
+ Args:
+ service (str): Service name or ID.
+ insert_defaults (boolean): If true, default values will be merged
+ into the service inspect output.
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/services/{0}', service)
+ params = {}
+ if insert_defaults is not None:
+ if utils.version_lt(self._version, '1.29'):
+ raise errors.InvalidVersion(
+ 'insert_defaults is not supported in API version < 1.29'
+ )
+ params['insertDefaults'] = insert_defaults
+
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('task')
+ def inspect_task(self, task):
+ """
+ Retrieve information about a task.
+
+ Args:
+ task (str): Task ID
+
+ Returns:
+ (dict): Information about the task.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/tasks/{0}', task)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def remove_service(self, service):
+ """
+ Stop and remove a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ url = self._url('/services/{0}', service)
+ resp = self._delete(url)
+ self._raise_for_status(resp)
+ return True
+
+ @utils.minimum_version('1.24')
+ def services(self, filters=None):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+
+ Returns:
+ A list of dictionaries containing data about each service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/services')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('service')
+ def service_logs(self, service, details=False, follow=False, stdout=False,
+ stderr=False, since=0, timestamps=False, tail='all',
+ is_tty=None):
+ """
+ Get log stream for a service.
+ Note: This endpoint works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ service (str): ID or name of the service
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+ is_tty (bool): Whether the service's :py:class:`ContainerSpec`
+ enables the TTY option. If omitted, the method will query
+ the Engine for the information, causing an additional
+ roundtrip.
+
+ Returns (generator): Logs for the service.
+ """
+ params = {
+ 'details': details,
+ 'follow': follow,
+ 'stdout': stdout,
+ 'stderr': stderr,
+ 'since': since,
+ 'timestamps': timestamps,
+ 'tail': tail
+ }
+
+ url = self._url('/services/{0}/logs', service)
+ res = self._get(url, params=params, stream=True)
+ if is_tty is None:
+ is_tty = self.inspect_service(
+ service
+ )['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
+ return self._get_result_tty(True, res, is_tty)
+
+ @utils.minimum_version('1.24')
+ def tasks(self, filters=None):
+ """
+ Retrieve a list of tasks.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``service``, ``node``,
+ ``label`` and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/tasks')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.24')
+ @utils.check_resource('service')
+ def update_service(self, service, version, task_template=None, name=None,
+ labels=None, mode=None, update_config=None,
+ networks=None, endpoint_config=None,
+ endpoint_spec=None, fetch_current_spec=False):
+ """
+ Update a service.
+
+ Args:
+ service (string): A service identifier (either its name or service
+ ID).
+ version (int): The version number of the service object being
+ updated. This is required to avoid conflicting writes.
+ task_template (TaskTemplate): Specification of the updated task to
+ start as part of the service.
+ name (string): New name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ fetch_current_spec (boolean): Use the undefined settings from the
+ current specification of the service. Default: ``False``
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ _check_api_features(
+ self._version, task_template, update_config, endpoint_spec
+ )
+
+ if fetch_current_spec:
+ inspect_defaults = True
+ if utils.version_lt(self._version, '1.29'):
+ inspect_defaults = None
+ current = self.inspect_service(
+ service, insert_defaults=inspect_defaults
+ )['Spec']
+
+ else:
+ current = {}
+
+ url = self._url('/services/{0}/update', service)
+ data = {}
+ headers = {}
+
+ data['Name'] = current.get('Name') if name is None else name
+
+ data['Labels'] = current.get('Labels') if labels is None else labels
+
+ if mode is not None:
+ if not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+ data['Mode'] = mode
+ else:
+ data['Mode'] = current.get('Mode')
+
+ data['TaskTemplate'] = _merge_task_template(
+ current.get('TaskTemplate', {}), task_template
+ )
+
+ container_spec = data['TaskTemplate'].get('ContainerSpec', {})
+ image = container_spec.get('Image', None)
+ if image is not None:
+ registry, repo_name = auth.resolve_repository_name(image)
+ auth_header = auth.get_config_header(self, registry)
+ if auth_header:
+ headers['X-Registry-Auth'] = auth_header
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+ else:
+ data['UpdateConfig'] = current.get('UpdateConfig')
+
+ if networks is not None:
+ converted_networks = utils.convert_service_networks(networks)
+ if utils.version_lt(self._version, '1.25'):
+ data['Networks'] = converted_networks
+ else:
+ data['TaskTemplate']['Networks'] = converted_networks
+ elif utils.version_lt(self._version, '1.25'):
+ data['Networks'] = current.get('Networks')
+ elif data['TaskTemplate'].get('Networks') is None:
+ current_task_template = current.get('TaskTemplate', {})
+ current_networks = current_task_template.get('Networks')
+ if current_networks is None:
+ current_networks = current.get('Networks')
+ if current_networks is not None:
+ data['TaskTemplate']['Networks'] = current_networks
+
+ if endpoint_spec is not None:
+ data['EndpointSpec'] = endpoint_spec
+ else:
+ data['EndpointSpec'] = current.get('EndpointSpec')
+
+ resp = self._post_json(
+ url, data=data, params={'version': version}, headers=headers
+ )
+ self._raise_for_status(resp)
+ return True
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
new file mode 100644
index 0000000..04595da
--- /dev/null
+++ b/docker/api/swarm.py
@@ -0,0 +1,389 @@
+import logging
+from six.moves import http_client
+from .. import errors
+from .. import types
+from .. import utils
+
+log = logging.getLogger(__name__)
+
+
+class SwarmApiMixin(object):
+
+ def create_swarm_spec(self, *args, **kwargs):
+ """
+ Create a :py:class:`docker.types.SwarmSpec` instance that can be used
+ as the ``swarm_spec`` argument in
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
+
+ Args:
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_cas (:py:class:`list`): Configuration for forwarding
+ signing requests to an external certificate authority. Use
+ a list of :py:class:`docker.types.SwarmExternalCA`.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ :py:class:`docker.types.SwarmSpec`
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> spec = client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ >>> client.init_swarm(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, swarm_spec=spec
+ )
+ """
+ ext_ca = kwargs.pop('external_ca', None)
+ if ext_ca:
+ kwargs['external_cas'] = [ext_ca]
+ return types.SwarmSpec(self._version, *args, **kwargs)
+
+ @utils.minimum_version('1.24')
+ def get_unlock_key(self):
+ """
+ Get the unlock key for this Swarm manager.
+
+ Returns:
+ A ``dict`` containing an ``UnlockKey`` member
+ """
+ return self._result(self._get(self._url('/swarm/unlockkey')), True)
+
+ @utils.minimum_version('1.24')
+ def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, swarm_spec=None):
+ """
+ Initialize a new Swarm using the current connected engine as the first
+ node.
+
+ Args:
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ ``advertise_addr`` is not specified, it will be automatically
+ detected when possible. Default: None
+ listen_addr (string): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: '0.0.0.0:2377'
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ swarm_spec (dict): Configuration settings of the new Swarm. Use
+ ``APIClient.create_swarm_spec`` to generate a valid
+ configuration. Default: None
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ url = self._url('/swarm/init')
+ if swarm_spec is not None and not isinstance(swarm_spec, dict):
+ raise TypeError('swarm_spec must be a dictionary')
+ data = {
+ 'AdvertiseAddr': advertise_addr,
+ 'ListenAddr': listen_addr,
+ 'ForceNewCluster': force_new_cluster,
+ 'Spec': swarm_spec,
+ }
+ response = self._post_json(url, data=data)
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def inspect_swarm(self):
+ """
+ Retrieve low-level information about the current swarm.
+
+ Returns:
+ A dictionary containing data about the swarm.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/swarm')
+ return self._result(self._get(url), True)
+
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def inspect_node(self, node_id):
+ """
+ Retrieve low-level information about a swarm node
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A dictionary containing data about this node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.24')
+ def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
+ advertise_addr=None):
+ """
+ Make this Engine join a swarm that has already been created.
+
+ Args:
+ remote_addrs (:py:class:`list`): Addresses of one or more manager
+ nodes already participating in the Swarm to join.
+ join_token (string): Secret token for joining this Swarm.
+ listen_addr (string): Listen address used for inter-manager
+ communication if the node gets promoted to manager, as well as
+ determining the networking interface used for the VXLAN Tunnel
+ Endpoint (VTEP). Default: ``None``
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ AdvertiseAddr is not specified, it will be automatically
+ detected when possible. Default: ``None``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ data = {
+ "RemoteAddrs": remote_addrs,
+ "ListenAddr": listen_addr,
+ "JoinToken": join_token,
+ "AdvertiseAddr": advertise_addr,
+ }
+ url = self._url('/swarm/join')
+ response = self._post_json(url, data=data)
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def leave_swarm(self, force=False):
+ """
+ Leave a swarm.
+
+ Args:
+ force (bool): Leave the swarm even if this node is a manager.
+ Default: ``False``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/swarm/leave')
+ response = self._post(url, params={'force': force})
+ # Ignore "this node is not part of a swarm" error
+ if force and response.status_code == http_client.NOT_ACCEPTABLE:
+ return True
+ # FIXME: Temporary workaround for 1.13.0-rc bug
+ # https://github.com/docker/docker/issues/29192
+ if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
+ return True
+ self._raise_for_status(response)
+ return True
+
+ @utils.minimum_version('1.24')
+ def nodes(self, filters=None):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of dictionaries containing data about each swarm node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/nodes')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+
+ return self._result(self._get(url, params=params), True)
+
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def remove_node(self, node_id, force=False):
+ """
+ Remove a node from the swarm.
+
+ Args:
+ node_id (string): ID of the node to be removed.
+ force (bool): Force remove an active node. Default: `False`
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node referenced doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ Returns:
+ `True` if the request was successful.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ params = {
+ 'force': force
+ }
+ res = self._delete(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def unlock_swarm(self, key):
+ """
+ Unlock a locked swarm.
+
+ Args:
+ key (string): The unlock key as provided by
+ :py:meth:`get_unlock_key`
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the key argument is in an incompatible format
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ `True` if the request was successful.
+
+ Example:
+
+ >>> key = client.get_unlock_key()
+ >>> client.unlock_node(key)
+
+ """
+ if isinstance(key, dict):
+ if 'UnlockKey' not in key:
+ raise errors.InvalidArgument('Invalid unlock key format')
+ else:
+ key = {'UnlockKey': key}
+
+ url = self._url('/swarm/unlock')
+ res = self._post_json(url, data=key)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_node(self, node_id, version, node_spec=None):
+ """
+ Update the node's configuration
+
+ Args:
+
+ node_id (string): ID of the node to be updated.
+ version (int): The version number of the node object being
+ updated. This is required to avoid conflicting writes.
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ node_spec=node_spec)
+
+ """
+ url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
+ res = self._post_json(url, data=node_spec)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
+ rotate_manager_token=False):
+ """
+ Update the Swarm's configuration
+
+ Args:
+ version (int): The version number of the swarm object being
+ updated. This is required to avoid conflicting writes.
+ swarm_spec (dict): Configuration settings to update. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
+ generate a valid configuration. Default: ``None``.
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ url = self._url('/swarm/update')
+ response = self._post_json(url, data=swarm_spec, params={
+ 'rotateWorkerToken': rotate_worker_token,
+ 'rotateManagerToken': rotate_manager_token,
+ 'version': version
+ })
+ self._raise_for_status(response)
+ return True
diff --git a/docker/api/volume.py b/docker/api/volume.py
new file mode 100644
index 0000000..900a608
--- /dev/null
+++ b/docker/api/volume.py
@@ -0,0 +1,161 @@
+from .. import errors
+from .. import utils
+
+
+class VolumeApiMixin(object):
+ def volumes(self, filters=None):
+ """
+ List volumes currently registered by the docker daemon. Similar to the
+ ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (dict): Dictionary with list of volume objects as value of the
+ ``Volumes`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.volumes()
+ {u'Volumes': [{u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'},
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
+ u'Name': u'baz'}]}
+ """
+
+ params = {
+ 'filters': utils.convert_filters(filters) if filters else None
+ }
+ url = self._url('/volumes')
+ return self._result(self._get(url, params=params), True)
+
+ def create_volume(self, name=None, driver=None, driver_opts=None,
+ labels=None):
+ """
+ Create and register a named volume
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (dict): The created volume reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = cli.create_volume(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+ >>> print(volume)
+ {u'Driver': u'local',
+ u'Labels': {u'key': u'value'},
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar',
+ u'Scope': u'local'}
+
+ """
+ url = self._url('/volumes/create')
+ if driver_opts is not None and not isinstance(driver_opts, dict):
+ raise TypeError('driver_opts must be a dictionary')
+
+ data = {
+ 'Name': name,
+ 'Driver': driver,
+ 'DriverOpts': driver_opts,
+ }
+
+ if labels is not None:
+ if utils.compare_version('1.23', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'volume labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ return self._result(self._post_json(url, data=data), True)
+
+ def inspect_volume(self, name):
+ """
+ Retrieve volume info by name.
+
+ Args:
+ name (str): volume name
+
+ Returns:
+ (dict): Volume information dictionary
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.inspect_volume('foobar')
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
+ url = self._url('/volumes/{0}', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def prune_volumes(self, filters=None):
+ """
+ Delete unused volumes
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted volume names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/volumes/prune')
+ return self._result(self._post(url, params=params), True)
+
+ def remove_volume(self, name, force=False):
+ """
+ Remove a volume. Similar to the ``docker volume rm`` command.
+
+ Args:
+ name (str): The volume's name
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ params = {}
+ if force:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force removal was introduced in API 1.25'
+ )
+ params = {'force': force}
+
+ url = self._url('/volumes/{0}', name, params=params)
+ resp = self._delete(url)
+ self._raise_for_status(resp)
diff --git a/docker/auth.py b/docker/auth.py
new file mode 100644
index 0000000..9635f93
--- /dev/null
+++ b/docker/auth.py
@@ -0,0 +1,303 @@
+import base64
+import json
+import logging
+
+import dockerpycreds
+import six
+
+from . import errors
+from .utils import config
+
+INDEX_NAME = 'docker.io'
+INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+TOKEN_USERNAME = '<token>'
+
+log = logging.getLogger(__name__)
+
+
+def resolve_repository_name(repo_name):
+ if '://' in repo_name:
+ raise errors.InvalidRepository(
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ )
+
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
+ raise errors.InvalidRepository(
+ 'Invalid index name ({0}). Cannot begin or end with a'
+ ' hyphen.'.format(index_name)
+ )
+ return resolve_index_name(index_name), remote_name
+
+
+def resolve_index_name(index_name):
+ index_name = convert_to_hostname(index_name)
+ if index_name == 'index.' + INDEX_NAME:
+ index_name = INDEX_NAME
+ return index_name
+
+
+def get_config_header(client, registry):
+ log.debug('Looking for auth config')
+ if not client._auth_configs:
+ log.debug(
+ "No auth config in memory - loading from filesystem"
+ )
+ client._auth_configs = load_config()
+ authcfg = resolve_authconfig(
+ client._auth_configs, registry, credstore_env=client.credstore_env
+ )
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ log.debug('Found auth config')
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ return encode_header(authcfg)
+ log.debug('No auth config found')
+ return None
+
+
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
+
+
+def get_credential_store(authconfig, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = 'https://index.docker.io/v1/'
+
+ return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
+ 'credsStore'
+ )
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the config
+ with full URLs are stripped down to hostnames before checking for a match.
+ Returns None if no match was found.
+ """
+
+ if 'credHelpers' in authconfig or 'credsStore' in authconfig:
+ store_name = get_credential_store(authconfig, registry)
+ if store_name is not None:
+ log.debug(
+ 'Using credentials store "{0}"'.format(store_name)
+ )
+ cfg = _resolve_authconfig_credstore(
+ authconfig, registry, store_name, env=credstore_env
+ )
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug("Looking for auth entry for {0}".format(repr(registry)))
+
+ authdict = authconfig.get('auths', {})
+ if registry in authdict:
+ log.debug("Found {0}".format(repr(registry)))
+ return authdict[registry]
+
+ for key, conf in six.iteritems(authdict):
+ if resolve_index_name(key) == registry:
+ log.debug("Found {0}".format(repr(key)))
+ return conf
+
+ log.debug("No entry found")
+ return None
+
+
+def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
+ env=None):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ store = dockerpycreds.Store(credstore_name, environment=env)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except dockerpycreds.CredentialsNotFound as e:
+ log.debug('No entry found')
+ return None
+ except dockerpycreds.StoreError as e:
+ raise errors.DockerException(
+ 'Credentials store error: {0}'.format(repr(e))
+ )
+
+
+def convert_to_hostname(url):
+ return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
+
+
+def decode_auth(auth):
+ if isinstance(auth, six.string_types):
+ auth = auth.encode('ascii')
+ s = base64.b64decode(auth)
+ login, pwd = s.split(b':', 1)
+ return login.decode('utf8'), pwd.decode('utf8')
+
+
+def encode_header(auth):
+ auth_json = json.dumps(auth).encode('ascii')
+ return base64.urlsafe_b64encode(auth_json)
+
+
+def parse_auth(entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in six.iteritems(entries):
+ if not isinstance(entry, dict):
+ log.debug(
+ 'Config entry for key {0} is not auth config'.format(registry)
+ )
+ # We sometimes fall back to parsing the whole config as if it was
+ # the auth config by itself, for legacy purposes. In that case, we
+ # fail silently and return an empty conf if any of the keys is not
+ # formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(registry)
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug('Found an IdentityToken entry for registry {0}'.format(
+ registry
+ ))
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token, skip.
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ 'Auth data for {0} is absent. Client might be using a '
+ 'credentials store instead.'.format(registry)
+ )
+ conf[registry] = {}
+ continue
+
+ username, password = decode_auth(entry['auth'])
+ log.debug(
+ 'Found entry (registry={0}, username={1})'
+ .format(repr(registry), repr(username))
+ )
+
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+
+def load_config(config_path=None, config_dict=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment variable >
+ ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return {}
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return _load_legacy_config(config_file)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return res
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret"
+ "as auth-only file"
+ )
+ return {'auths': parse_auth(config_dict)}
+
+
+def _load_legacy_config(config_file):
+ log.debug("Attempting to parse legacy auth file format")
+ try:
+ data = []
+ with open(config_file) as f:
+ for line in f.readlines():
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!'
+ )
+
+ username, password = decode_auth(data[0])
+ return {'auths': {
+ INDEX_NAME: {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
+ }
+ }}
+ except Exception as e:
+ log.debug(e)
+ pass
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/docker/client.py b/docker/client.py
new file mode 100644
index 0000000..8d4a52b
--- /dev/null
+++ b/docker/client.py
@@ -0,0 +1,208 @@
+from .api.client import APIClient
+from .constants import DEFAULT_TIMEOUT_SECONDS
+from .models.configs import ConfigCollection
+from .models.containers import ContainerCollection
+from .models.images import ImageCollection
+from .models.networks import NetworkCollection
+from .models.nodes import NodeCollection
+from .models.plugins import PluginCollection
+from .models.secrets import SecretCollection
+from .models.services import ServiceCollection
+from .models.swarm import Swarm
+from .models.volumes import VolumeCollection
+from .utils import kwargs_from_env
+
+
+class DockerClient(object):
+ """
+ A client for communicating with a Docker server.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ """
+ def __init__(self, *args, **kwargs):
+ self.api = APIClient(*args, **kwargs)
+
+ @classmethod
+ def from_env(cls, **kwargs):
+ """
+ Return a client configured from environment variables.
+
+ The environment variables used are the same as those used by the
+ Docker command-line client. They are:
+
+ .. envvar:: DOCKER_HOST
+
+ The URL to the Docker host.
+
+ .. envvar:: DOCKER_TLS_VERIFY
+
+ Verify the host against a CA certificate.
+
+ .. envvar:: DOCKER_CERT_PATH
+
+ A path to a directory containing TLS certificates to use when
+ connecting to the Docker host.
+
+ Args:
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.30``
+ timeout (int): Default timeout for API calls, in seconds.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+ environment (dict): The environment to read environment variables
+ from. Default: the value of ``os.environ``
+ credstore_env (dict): Override environment variables when calling
+ the credential store process.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.from_env()
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
+ version = kwargs.pop('version', None)
+ return cls(
+ timeout=timeout, version=version, **kwargs_from_env(**kwargs)
+ )
+
+ # Resources
+ @property
+ def configs(self):
+ """
+ An object for managing configs on the server. See the
+ :doc:`configs documentation <configs>` for full details.
+ """
+ return ConfigCollection(client=self)
+
+ @property
+ def containers(self):
+ """
+ An object for managing containers on the server. See the
+ :doc:`containers documentation <containers>` for full details.
+ """
+ return ContainerCollection(client=self)
+
+ @property
+ def images(self):
+ """
+ An object for managing images on the server. See the
+ :doc:`images documentation <images>` for full details.
+ """
+ return ImageCollection(client=self)
+
+ @property
+ def networks(self):
+ """
+ An object for managing networks on the server. See the
+ :doc:`networks documentation <networks>` for full details.
+ """
+ return NetworkCollection(client=self)
+
+ @property
+ def nodes(self):
+ """
+ An object for managing nodes on the server. See the
+ :doc:`nodes documentation <nodes>` for full details.
+ """
+ return NodeCollection(client=self)
+
+ @property
+ def plugins(self):
+ """
+ An object for managing plugins on the server. See the
+ :doc:`plugins documentation <plugins>` for full details.
+ """
+ return PluginCollection(client=self)
+
+ @property
+ def secrets(self):
+ """
+ An object for managing secrets on the server. See the
+ :doc:`secrets documentation <secrets>` for full details.
+ """
+ return SecretCollection(client=self)
+
+ @property
+ def services(self):
+ """
+ An object for managing services on the server. See the
+ :doc:`services documentation <services>` for full details.
+ """
+ return ServiceCollection(client=self)
+
+ @property
+ def swarm(self):
+ """
+ An object for managing a swarm on the server. See the
+ :doc:`swarm documentation <swarm>` for full details.
+ """
+ return Swarm(client=self)
+
+ @property
+ def volumes(self):
+ """
+ An object for managing volumes on the server. See the
+ :doc:`volumes documentation <volumes>` for full details.
+ """
+ return VolumeCollection(client=self)
+
+ # Top-level methods
+ def events(self, *args, **kwargs):
+ return self.api.events(*args, **kwargs)
+ events.__doc__ = APIClient.events.__doc__
+
+ def df(self):
+ return self.api.df()
+ df.__doc__ = APIClient.df.__doc__
+
+ def info(self, *args, **kwargs):
+ return self.api.info(*args, **kwargs)
+ info.__doc__ = APIClient.info.__doc__
+
+ def login(self, *args, **kwargs):
+ return self.api.login(*args, **kwargs)
+ login.__doc__ = APIClient.login.__doc__
+
+ def ping(self, *args, **kwargs):
+ return self.api.ping(*args, **kwargs)
+ ping.__doc__ = APIClient.ping.__doc__
+
+ def version(self, *args, **kwargs):
+ return self.api.version(*args, **kwargs)
+ version.__doc__ = APIClient.version.__doc__
+
+ def close(self):
+ return self.api.close()
+ close.__doc__ = APIClient.close.__doc__
+
+ def __getattr__(self, name):
+ s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ # If a user calls a method on APIClient, they
+ if hasattr(APIClient, name):
+ s.append("In Docker SDK for Python 2.0, this method is now on the "
+ "object APIClient. See the low-level API section of the "
+ "documentation for more details.")
+ raise AttributeError(' '.join(s))
+
+
+from_env = DockerClient.from_env
diff --git a/docker/constants.py b/docker/constants.py
new file mode 100644
index 0000000..7565a76
--- /dev/null
+++ b/docker/constants.py
@@ -0,0 +1,20 @@
+import sys
+from .version import version
+
+DEFAULT_DOCKER_API_VERSION = '1.35'
+MINIMUM_DOCKER_API_VERSION = '1.21'
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+CONTAINER_LIMITS_KEYS = [
+ 'memory', 'memswap', 'cpushares', 'cpusetcpus'
+]
+
+INSECURE_REGISTRY_DEPRECATION_WARNING = \
+ 'The `insecure_registry` argument to {} ' \
+ 'is deprecated and non-functional. Please remove it.'
+
+IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+
+DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
+DEFAULT_NUM_POOLS = 25
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
diff --git a/docker/errors.py b/docker/errors.py
new file mode 100644
index 0000000..0253695
--- /dev/null
+++ b/docker/errors.py
@@ -0,0 +1,162 @@
+import requests
+
+
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = (response.content or '').strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and ('No such image' in str(explanation) or
+ 'not found: does not exist or no pull access'
+ in str(explanation) or
+ 'repository does not exist' in str(explanation)):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise cls(e, response=response, explanation=explanation)
+
+
+class APIError(requests.exceptions.HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
+ # requests 1.2 supports response as a keyword argument, but
+ # requests 1.1 doesn't
+ super(APIError, self).__init__(message)
+ self.response = response
+ self.explanation = explanation
+
+ def __str__(self):
+ message = super(APIError, self).__str__()
+
+ if self.is_client_error():
+ message = '{0} Client Error: {1}'.format(
+ self.response.status_code, self.response.reason)
+
+ elif self.is_server_error():
+ message = '{0} Server Error: {1}'.format(
+ self.response.status_code, self.response.reason)
+
+ if self.explanation:
+ message = '{0} ("{1}")'.format(message, self.explanation)
+
+ return message
+
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
+ def is_client_error(self):
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
+
+ def is_server_error(self):
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
+
+
+class NotFound(APIError):
+ pass
+
+
+class ImageNotFound(NotFound):
+ pass
+
+
+class InvalidVersion(DockerException):
+ pass
+
+
+class InvalidRepository(DockerException):
+ pass
+
+
+class InvalidConfigFile(DockerException):
+ pass
+
+
+class InvalidArgument(DockerException):
+ pass
+
+
+class DeprecatedMethod(DockerException):
+ pass
+
+
+class TLSParameterError(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg + (". TLS configurations should map the Docker CLI "
+ "client configurations. See "
+ "https://docs.docker.com/engine/articles/https/ "
+ "for API details.")
+
+
+class NullResource(DockerException, ValueError):
+ pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+
+ err = ": {}".format(stderr) if stderr is not None else ""
+ msg = ("Command '{}' in image '{}' returned non-zero exit "
+ "status {}{}").format(command, image, exit_status, err)
+
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super(BuildError, self).__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
+ text = ["{}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
diff --git a/docker/models/__init__.py b/docker/models/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/docker/models/__init__.py
diff --git a/docker/models/configs.py b/docker/models/configs.py
new file mode 100644
index 0000000..7f23f65
--- /dev/null
+++ b/docker/models/configs.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Config(Model):
+ """A config."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this config.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If config failed to remove.
+ """
+ return self.client.api.remove_config(self.id)
+
+
+class ConfigCollection(Collection):
+ """Configs on the Docker server."""
+ model = Config
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_config(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_config.__doc__
+
+ def get(self, config_id):
+ """
+ Get a config.
+
+ Args:
+ config_id (str): Config ID.
+
+ Returns:
+ (:py:class:`Config`): The config.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the config does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_config(config_id))
+
+ def list(self, **kwargs):
+ """
+ List configs. Similar to the ``docker config ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Config`): The configs.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.configs(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/containers.py b/docker/models/containers.py
new file mode 100644
index 0000000..b33a718
--- /dev/null
+++ b/docker/models/containers.py
@@ -0,0 +1,1068 @@
+import copy
+import ntpath
+from collections import namedtuple
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import (
+ ContainerError, DockerException, ImageNotFound,
+ NotFound, create_unexpected_kwargs_error
+)
+from ..types import HostConfig
+from ..utils import version_gte
+from .images import Image
+from .resource import Collection, Model
+
+
+class Container(Model):
+
+ @property
+ def name(self):
+ """
+ The name of the container.
+ """
+ if self.attrs.get('Name') is not None:
+ return self.attrs['Name'].lstrip('/')
+
+ @property
+ def image(self):
+ """
+ The image of the container.
+ """
+ image_id = self.attrs.get('ImageID', self.attrs['Image'])
+ if image_id is None:
+ return None
+ return self.client.images.get(image_id.split(':')[1])
+
+ @property
+ def labels(self):
+ """
+ The labels of a container as dictionary.
+ """
+ try:
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+ except KeyError:
+ raise DockerException(
+ 'Label data is not available for sparse objects. Call reload()'
+ ' to retrieve all information'
+ )
+
+ @property
+ def status(self):
+ """
+ The status of the container. For example, ``running``, or ``exited``.
+ """
+ if isinstance(self.attrs['State'], dict):
+ return self.attrs['State']['Status']
+ return self.attrs['State']
+
+ def attach(self, **kwargs):
+ """
+ Attach to this container.
+
+ :py:meth:`logs` is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach(self.id, **kwargs)
+
+ def attach_socket(self, **kwargs):
+ """
+ Like :py:meth:`attach`, but returns the underlying socket-like object
+ for the HTTP request.
+
+ Args:
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach_socket(self.id, **kwargs)
+
+ def commit(self, repository=None, tag=None, **kwargs):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ resp = self.client.api.commit(self.id, repository=repository, tag=tag,
+ **kwargs)
+ return self.client.images.get(resp['Id'])
+
+ def diff(self):
+ """
+ Inspect changes on a container's filesystem.
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.diff(self.id)
+
+ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
+ privileged=False, user='', detach=False, stream=False,
+ socket=False, environment=None, workdir=None):
+ """
+ Run a command inside this container. Similar to
+ ``docker exec``.
+
+ Args:
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ stream (bool): Stream response data. Default: False
+ socket (bool): Return the connection socket to allow custom
+ read/write operations. Default: False
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ workdir (str): Path to working directory for this exec session
+
+ Returns:
+ (ExecResult): A tuple of (exit_code, output)
+ exit_code: (int):
+ Exit code for the executed command or ``None`` if
+ either ``stream```or ``socket`` is ``True``.
+ output: (generator or str):
+ If ``stream=True``, a generator yielding response chunks.
+ If ``socket=True``, a socket object for the connection.
+ A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.exec_create(
+ self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
+ privileged=privileged, user=user, environment=environment,
+ workdir=workdir
+ )
+ exec_output = self.client.api.exec_start(
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ )
+ if socket or stream:
+ return ExecResult(None, exec_output)
+
+ return ExecResult(
+ self.client.api.exec_inspect(resp['Id'])['ExitCode'],
+ exec_output
+ )
+
+ def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Export the contents of the container's filesystem as a tar archive.
+
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.export(self.id, chunk_size)
+
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Retrieve a file or folder from the container in the form of a tar
+ archive.
+
+ Args:
+ path (str): Path to the file or folder to retrieve
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.get_archive(self.id, path, chunk_size)
+
+ def kill(self, signal=None):
+ """
+ Kill or send a signal to the container.
+
+ Args:
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self.client.api.kill(self.id, signal=signal)
+
+ def logs(self, **kwargs):
+ """
+ Get logs from this container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+ until (datetime or int): Show logs that occurred before the given
+ datetime or integer epoch (in seconds)
+
+ Returns:
+ (generator or str): Logs from the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.logs(self.id, **kwargs)
+
+ def pause(self):
+ """
+ Pauses all processes within this container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.pause(self.id)
+
+ def put_archive(self, path, data):
+ """
+ Insert a file or folder in this container using a tar archive as
+ source.
+
+ Args:
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
+ return self.client.api.put_archive(self.id, path, data)
+
+ def remove(self, **kwargs):
+ """
+ Remove this container. Similar to the ``docker rm`` command.
+
+ Args:
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_container(self.id, **kwargs)
+
+ def rename(self, name):
+ """
+ Rename this container. Similar to the ``docker rename`` command.
+
+ Args:
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.rename(self.id, name)
+
+ def resize(self, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.resize(self.id, height, width)
+
+ def restart(self, **kwargs):
+ """
+ Restart this container. Similar to the ``docker restart`` command.
+
+ Args:
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.restart(self.id, **kwargs)
+
+ def start(self, **kwargs):
+ """
+ Start this container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.start(self.id, **kwargs)
+
+ def stats(self, **kwargs):
+ """
+ Stream statistics for this container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stats(self.id, **kwargs)
+
+ def stop(self, **kwargs):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stop(self.id, **kwargs)
+
+ def top(self, **kwargs):
+ """
+ Display the running processes of the container.
+
+ Args:
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.top(self.id, **kwargs)
+
+ def unpause(self):
+ """
+ Unpause all processes within the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.unpause(self.id)
+
+ def update(self, **kwargs):
+ """
+ Update resource configuration of the containers.
+
+ Args:
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.update_container(self.id, **kwargs)
+
+ def wait(self, **kwargs):
+ """
+ Block until the container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ timeout (int): Request timeout
+ condition (str): Wait until a container state reaches the given
+ condition, either ``not-running`` (default), ``next-exit``,
+ or ``removed``
+
+ Returns:
+ (dict): The API's response as a Python dictionary, including
+ the container's exit code under the ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.wait(self.id, **kwargs)
+
+
+class ContainerCollection(Collection):
+ model = Container
+
+ def run(self, image, command=None, stdout=True, stderr=False,
+ remove=False, **kwargs):
+ """
+ Run a container. By default, it will wait for the container to finish
+ and return its logs, similar to ``docker run``.
+
+ If the ``detach`` argument is ``True``, it will start the container
+ and immediately return a :py:class:`Container` object, similar to
+ ``docker run -d``.
+
+ Example:
+ Run a container and get its output:
+
+ >>> import docker
+ >>> client = docker.from_env()
+ >>> client.containers.run('alpine', 'echo hello world')
+ b'hello world\\n'
+
+ Run a container and detach:
+
+ >>> container = client.containers.run('bfirsh/reticulate-splines',
+ detach=True)
+ >>> container.logs()
+ 'Reticulating spline 1...\\nReticulating spline 2...\\n'
+
+ Args:
+ image (str): The image to run.
+ command (str or list): The command to run in the container.
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_count (int): Number of usable CPUs (Windows only).
+ cpu_percent (int): Usable percentage of the available CPUs
+ (Windows only).
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ detach (bool): Run container in the background and return a
+ :py:class:`Container` object.
+ device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
+ apply to the container.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file.
+ dns_search (:py:class:`list`): DNS search domains.
+ domainname (str or list): Set custom DNS search domains.
+ entrypoint (str or list): The entrypoint for the container.
+ environment (dict or list): Environment variables to set inside
+ the container, as a dictionary or a list of strings in the
+ format ``["SOMEVARIABLE=xxx"]``.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+ hostname (str): Optional hostname for the container.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ mac_address (str): MAC address to assign to the container.
+ mem_limit (int or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ intended unit.
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ mounts (:py:class:`list`): Specification for mounts to be added to
+ the container. More powerful alternative to ``volumes``. Each
+ item in the list is expected to be a
+ :py:class:`docker.types.Mount` object.
+ name (str): The name for this container.
+ nano_cpus (int): CPU quota in units of 1e-9 CPUs.
+ network (str): Name of the network this container will be connected
+ to at creation time. You can connect to additional networks
+ using :py:meth:`Network.connect`. Incompatible with
+ ``network_mode``.
+ network_disabled (bool): Disable networking.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+
+ Incompatible with ``network``.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ Only used if the method needs to pull the requested image.
+ ports (dict): Ports to bind inside the container.
+
+ The keys of the dictionary are the ports to bind inside the
+ container, either as an integer or a string in the form
+ ``port/protocol``, where the protocol is either ``tcp`` or
+ ``udp``.
+
+ The values of the dictionary are the corresponding ports to
+ open on the host, which can be either:
+
+ - The port number, as an integer. For example,
+ ``{'2222/tcp': 3333}`` will expose port 2222 inside the
+ container as port 3333 on the host.
+ - ``None``, to assign a random host port. For example,
+ ``{'2222/tcp': None}``.
+ - A tuple of ``(address, port)`` if you want to specify the
+ host interface. For example,
+ ``{'1111/tcp': ('127.0.0.1', 1111)}``.
+ - A list of integers, if you want to bind multiple host ports
+ to a single container port. For example,
+ ``{'1111/tcp': [1234, 4567]}``.
+
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ remove (bool): Remove the container when it has finished running.
+ Default: ``False``.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+
+ For example:
+ ``{"Name": "on-failure", "MaximumRetryCount": 5}``
+
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ stdin_open (bool): Keep ``STDIN`` open even if not attached.
+ stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
+ Default: ``True``.
+ stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
+ Default: ``False``.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ stream (bool): If true and ``detach`` is false, return a log
+ generator instead of a string. Ignored if ``detach`` is true.
+ Default: ``False``.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ tty (bool): Allocate a pseudo-TTY.
+ ulimits (:py:class:`list`): Ulimits to set inside the container, as
+ a list of dicts.
+ user (str or int): Username or UID to run commands as inside the
+ container.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volume_driver (str): The name of a volume driver/plugin.
+ volumes (dict or list): A dictionary to configure volumes mounted
+ inside the container. The key is either the host path or a
+ volume name, and the value is a dictionary with the keys:
+
+ - ``bind`` The path to mount the volume inside the container
+ - ``mode`` Either ``rw`` to mount the volume read/write, or
+ ``ro`` to mount it read-only.
+
+ For example:
+
+ .. code-block:: python
+
+ {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
+ '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ working_dir (str): Path to the working directory.
+ runtime (str): Runtime to use with this container.
+
+ Returns:
+ The container logs, either ``STDOUT``, ``STDERR``, or both,
+ depending on the value of the ``stdout`` and ``stderr`` arguments.
+
+ ``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
+ or ``journald`` logging driver used. Thus, if you are using none of
+ these drivers, a ``None`` object is returned instead. See the
+ `Engine API documentation
+ <https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
+ for full details.
+
+ If ``detach`` is ``True``, a :py:class:`Container` object is
+ returned instead.
+
+ Raises:
+ :py:class:`docker.errors.ContainerError`
+ If the container exits with a non-zero exit code and
+ ``detach`` is ``False``.
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ stream = kwargs.pop('stream', False)
+ detach = kwargs.pop('detach', False)
+ platform = kwargs.pop('platform', None)
+
+ if detach and remove:
+ if version_gte(self.client.api._version, '1.25'):
+ kwargs["auto_remove"] = True
+ else:
+ raise RuntimeError("The options 'detach' and 'remove' cannot "
+ "be used together in api versions < 1.25.")
+
+ if kwargs.get('network') and kwargs.get('network_mode'):
+ raise RuntimeError(
+ 'The options "network" and "network_mode" can not be used '
+ 'together.'
+ )
+
+ try:
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+ except ImageNotFound:
+ self.client.images.pull(image, platform=platform)
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+
+ container.start()
+
+ if detach:
+ return container
+
+ logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
+
+ out = None
+ if logging_driver == 'json-file' or logging_driver == 'journald':
+ out = container.logs(
+ stdout=stdout, stderr=stderr, stream=True, follow=True
+ )
+
+ exit_status = container.wait()['StatusCode']
+ if exit_status != 0:
+ out = None
+ if not kwargs.get('auto_remove'):
+ out = container.logs(stdout=False, stderr=True)
+
+ if remove:
+ container.remove()
+ if exit_status != 0:
+ raise ContainerError(
+ container, exit_status, command, image, out
+ )
+
+ return out if stream or out is None else b''.join(
+ [line for line in out]
+ )
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a container without starting it. Similar to ``docker create``.
+
+ Takes the same arguments as :py:meth:`run`, except for ``stdout``,
+ ``stderr``, and ``remove``.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ kwargs['image'] = image
+ kwargs['command'] = command
+ kwargs['version'] = self.client.api._version
+ create_kwargs = _create_container_args(kwargs)
+ resp = self.client.api.create_container(**create_kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, container_id):
+ """
+ Get a container by name or ID.
+
+ Args:
+ container_id (str): Container name or ID.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the container does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.inspect_container(container_id)
+ return self.prepare_model(resp)
+
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None,
+ sparse=False, ignore_removed=False):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ sparse (bool): Do not inspect containers. Returns partial
+ information, but guaranteed not to block. Use
+ :py:meth:`Container.reload` on resulting objects to retrieve
+ all attributes. Default: ``False``
+ ignore_removed (bool): Ignore failures due to missing containers
+ when attempting to inspect containers from the original list.
+ Set to ``True`` if race conditions are likely. Has no effect
+ if ``sparse=True``. Default: ``False``
+
+ Returns:
+ (list of :py:class:`Container`)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.containers(all=all, before=before,
+ filters=filters, limit=limit,
+ since=since)
+ if sparse:
+ return [self.prepare_model(r) for r in resp]
+ else:
+ containers = []
+ for r in resp:
+ try:
+ containers.append(self.get(r['Id']))
+ # a container may have been removed while iterating
+ except NotFound:
+ if not ignore_removed:
+ raise
+ return containers
+
+ def prune(self, filters=None):
+ return self.client.api.prune_containers(filters=filters)
+ prune.__doc__ = APIClient.prune_containers.__doc__
+
+
+# kwargs to copy straight from run to create
+RUN_CREATE_KWARGS = [
+ 'command',
+ 'detach',
+ 'domainname',
+ 'entrypoint',
+ 'environment',
+ 'healthcheck',
+ 'hostname',
+ 'image',
+ 'labels',
+ 'mac_address',
+ 'name',
+ 'network_disabled',
+ 'stdin_open',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'volume_driver',
+ 'working_dir',
+]
+
+# kwargs to copy straight from run to host_config
+RUN_HOST_CONFIG_KWARGS = [
+ 'auto_remove',
+ 'blkio_weight_device',
+ 'blkio_weight',
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_period',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpuset_cpus',
+ 'cpuset_mems',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
+ 'device_cgroup_rules',
+ 'device_read_bps',
+ 'device_read_iops',
+ 'device_write_bps',
+ 'device_write_iops',
+ 'devices',
+ 'dns_opt',
+ 'dns_search',
+ 'dns',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'init_path',
+ 'ipc_mode',
+ 'isolation',
+ 'kernel_memory',
+ 'links',
+ 'log_config',
+ 'lxc_conf',
+ 'mem_limit',
+ 'mem_reservation',
+ 'mem_swappiness',
+ 'memswap_limit',
+ 'mounts',
+ 'nano_cpus',
+ 'network_mode',
+ 'oom_kill_disable',
+ 'oom_score_adj',
+ 'pid_mode',
+ 'pids_limit',
+ 'privileged',
+ 'publish_all_ports',
+ 'read_only',
+ 'restart_policy',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'tmpfs',
+ 'ulimits',
+ 'userns_mode',
+ 'version',
+ 'volumes_from',
+ 'runtime'
+]
+
+
+def _create_container_args(kwargs):
+ """
+ Convert arguments to create() to arguments to create_container().
+ """
+ # Copy over kwargs which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_CREATE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ host_config_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_HOST_CONFIG_KWARGS:
+ host_config_kwargs[key] = kwargs.pop(key)
+
+ # Process kwargs which are split over both create and host_config
+ ports = kwargs.pop('ports', {})
+ if ports:
+ host_config_kwargs['port_bindings'] = ports
+
+ volumes = kwargs.pop('volumes', {})
+ if volumes:
+ host_config_kwargs['binds'] = volumes
+
+ network = kwargs.pop('network', None)
+ if network:
+ create_kwargs['networking_config'] = {network: None}
+ host_config_kwargs['network_mode'] = network
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error('run', kwargs)
+
+ create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
+
+ # Fill in any kwargs which need processing by create_host_config first
+ port_bindings = create_kwargs['host_config'].get('PortBindings')
+ if port_bindings:
+ # sort to make consistent for tests
+ create_kwargs['ports'] = [tuple(p.split('/', 1))
+ for p in sorted(port_bindings.keys())]
+ if volumes:
+ if isinstance(volumes, dict):
+ create_kwargs['volumes'] = [
+ v.get('bind') for v in volumes.values()
+ ]
+ else:
+ create_kwargs['volumes'] = [
+ _host_volume_from_bind(v) for v in volumes
+ ]
+ return create_kwargs
+
+
+def _host_volume_from_bind(bind):
+ drive, rest = ntpath.splitdrive(bind)
+ bits = rest.split(':', 1)
+ if len(bits) == 1 or bits[1] in ('ro', 'rw'):
+ return drive + bits[0]
+ else:
+ return bits[1].rstrip(':ro').rstrip(':rw')
+
+
+ExecResult = namedtuple('ExecResult', 'exit_code,output')
+""" A result of Container.exec_run with the properties ``exit_code`` and
+ ``output``. """
diff --git a/docker/models/images.py b/docker/models/images.py
new file mode 100644
index 0000000..41632c6
--- /dev/null
+++ b/docker/models/images.py
@@ -0,0 +1,447 @@
+import itertools
+import re
+
+import six
+
+from ..api import APIClient
+from ..constants import DEFAULT_DATA_CHUNK_SIZE
+from ..errors import BuildError, ImageLoadError, InvalidArgument
+from ..utils import parse_repository_tag
+from ..utils.json_stream import json_stream
+from .resource import Collection, Model
+
+
+class Image(Model):
+ """
+ An image on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+
+ @property
+ def labels(self):
+ """
+ The labels of an image as dictionary.
+ """
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ if self.id.startswith('sha256:'):
+ return self.id[:17]
+ return self.id[:10]
+
+ @property
+ def tags(self):
+ """
+ The image's tags.
+ """
+ tags = self.attrs.get('RepoTags')
+ if tags is None:
+ tags = []
+ return [tag for tag in tags if tag != '<none>:<none>']
+
+ def history(self):
+ """
+ Show the history of an image.
+
+ Returns:
+ (str): The history of the image.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.history(self.id)
+
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ chunk_size (int): The number of bytes returned by each iteration
+ of the generator. If ``None``, data will be streamed as it is
+ received. Default: 2 MB
+
+ Returns:
+ (generator): A stream of raw archive data.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> for chunk in image:
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ return self.client.api.get_image(self.id, chunk_size)
+
+ def tag(self, repository, tag=None, **kwargs):
+ """
+ Tag this image into a repository. Similar to the ``docker tag``
+ command.
+
+ Args:
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ (bool): ``True`` if successful
+ """
+ return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+
+
+class RegistryData(Model):
+ """
+ Image metadata stored on the registry, including available platforms.
+ """
+ def __init__(self, image_name, *args, **kwargs):
+ super(RegistryData, self).__init__(*args, **kwargs)
+ self.image_name = image_name
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs['Descriptor']['digest']
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ return self.id[:17]
+
+ def pull(self, platform=None):
+ """
+ Pull the image digest.
+
+ Args:
+ platform (str): The platform to pull the image for.
+ Default: ``None``
+
+ Returns:
+ (:py:class:`Image`): A reference to the pulled image.
+ """
+ repository, _ = parse_repository_tag(self.image_name)
+ return self.collection.pull(repository, tag=self.id, platform=platform)
+
+ def has_platform(self, platform):
+ """
+ Check whether the given platform identifier is available for this
+ digest.
+
+ Args:
+ platform (str or dict): A string using the ``os[/arch[/variant]]``
+ format, or a platform dictionary.
+
+ Returns:
+ (bool): ``True`` if the platform is recognized as available,
+ ``False`` otherwise.
+
+ Raises:
+ :py:class:`docker.errors.InvalidArgument`
+ If the platform argument is not a valid descriptor.
+ """
+ if platform and not isinstance(platform, dict):
+ parts = platform.split('/')
+ if len(parts) > 3 or len(parts) < 1:
+ raise InvalidArgument(
+ '"{0}" is not a valid platform descriptor'.format(platform)
+ )
+ platform = {'os': parts[0]}
+ if len(parts) > 2:
+ platform['variant'] = parts[2]
+ if len(parts) > 1:
+ platform['architecture'] = parts[1]
+ return normalize_platform(
+ platform, self.client.version()
+ ) in self.attrs['Platforms']
+
+ def reload(self):
+ self.attrs = self.client.api.inspect_distribution(self.image_name)
+
+ reload.__doc__ = Model.reload.__doc__
+
+
+class ImageCollection(Collection):
+ model = Image
+
+ def build(self, **kwargs):
+ """
+ Build an image and return it. Similar to the ``docker build``
+ command. Either ``path`` or ``fileobj`` must be set.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ If you want to get the raw output of the build, use the
+ :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
+ low-level API.
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (list): A list of images used for build cache
+ resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ platform (str): Platform in the format ``os[/arch[/variant]]``.
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+
+ Returns:
+ (tuple): The first item is the :py:class:`Image` object for the
+ image that was build. The second item is a generator of the
+ build logs as JSON-decoded objects.
+
+ Raises:
+ :py:class:`docker.errors.BuildError`
+ If there is an error during the build.
+ :py:class:`docker.errors.APIError`
+ If the server returns any other error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ resp = self.client.api.build(**kwargs)
+ if isinstance(resp, six.string_types):
+ return self.get(resp)
+ last_event = None
+ image_id = None
+ result_stream, internal_stream = itertools.tee(json_stream(resp))
+ for chunk in internal_stream:
+ if 'error' in chunk:
+ raise BuildError(chunk['error'], result_stream)
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Successfully built |sha256:)([0-9a-f]+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ last_event = chunk
+ if image_id:
+ return (self.get(image_id), result_stream)
+ raise BuildError(last_event or 'Unknown', result_stream)
+
+ def get(self, name):
+ """
+ Gets an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`Image`): The image.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_image(name))
+
+ def get_registry_data(self, name):
+ """
+ Gets the registry data for an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`RegistryData`): The data object.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return RegistryData(
+ image_name=name,
+ attrs=self.client.api.inspect_distribution(name),
+ client=self.client,
+ collection=self,
+ )
+
+ def list(self, name=None, all=False, filters=None):
+ """
+ List images on the server.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.images(name=name, all=all, filters=filters)
+ return [self.get(r["Id"]) for r in resp]
+
+ def load(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
+ Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.load_image(data)
+ images = []
+ for chunk in resp:
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Loaded image ID: |^Loaded image: )(.+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ images.append(image_id)
+ if 'error' in chunk:
+ raise ImageLoadError(chunk['error'])
+
+ return [self.get(i) for i in images]
+
+ def pull(self, repository, tag=None, **kwargs):
+ """
+ Pull an image of the given name and return it. Similar to the
+ ``docker pull`` command.
+ If no tag is specified, all tags from that repository will be
+ pulled.
+
+ If you want to get the raw pull output, use the
+ :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
+ low-level API.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.client.DockerClient.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+
+ Returns:
+ (:py:class:`Image` or list): The image that has been pulled.
+ If no ``tag`` was specified, the method will return a list
+ of :py:class:`Image` objects belonging to this repository.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> # Pull the image tagged `latest` in the busybox repo
+ >>> image = client.images.pull('busybox:latest')
+
+ >>> # Pull all tags in the busybox repo
+ >>> images = client.images.pull('busybox')
+ """
+ if not tag:
+ repository, tag = parse_repository_tag(repository)
+
+ self.client.api.pull(repository, tag=tag, **kwargs)
+ if tag:
+ return self.get('{0}{2}{1}'.format(
+ repository, tag, '@' if tag.startswith('sha256:') else ':'
+ ))
+ return self.list(repository)
+
+ def push(self, repository, tag=None, **kwargs):
+ return self.client.api.push(repository, tag=tag, **kwargs)
+ push.__doc__ = APIClient.push.__doc__
+
+ def remove(self, *args, **kwargs):
+ self.client.api.remove_image(*args, **kwargs)
+ remove.__doc__ = APIClient.remove_image.__doc__
+
+ def search(self, *args, **kwargs):
+ return self.client.api.search(*args, **kwargs)
+ search.__doc__ = APIClient.search.__doc__
+
+ def prune(self, filters=None):
+ return self.client.api.prune_images(filters=filters)
+ prune.__doc__ = APIClient.prune_images.__doc__
+
+ def prune_builds(self, *args, **kwargs):
+ return self.client.api.prune_builds(*args, **kwargs)
+ prune_builds.__doc__ = APIClient.prune_builds.__doc__
+
+
+def normalize_platform(platform, engine_info):
+ if platform is None:
+ platform = {}
+ if 'os' not in platform:
+ platform['os'] = engine_info['Os']
+ if 'architecture' not in platform:
+ platform['architecture'] = engine_info['Arch']
+ return platform
diff --git a/docker/models/networks.py b/docker/models/networks.py
new file mode 100644
index 0000000..be3291a
--- /dev/null
+++ b/docker/models/networks.py
@@ -0,0 +1,215 @@
+from ..api import APIClient
+from ..utils import version_gte
+from .containers import Container
+from .resource import Model, Collection
+
+
+class Network(Model):
+ """
+ A Docker network.
+ """
+ @property
+ def name(self):
+ """
+ The name of the network.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def containers(self):
+ """
+ The containers that are connected to the network, as a list of
+ :py:class:`~docker.models.containers.Container` objects.
+ """
+ return [
+ self.client.containers.get(cid) for cid in
+ (self.attrs.get('Containers') or {}).keys()
+ ]
+
+ def connect(self, container, *args, **kwargs):
+ """
+ Connect a container to this network.
+
+ Args:
+ container (str): Container to connect to this network, as either
+ an ID, name, or :py:class:`~docker.models.containers.Container`
+ object.
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linkedto this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.connect_container_to_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def disconnect(self, container, *args, **kwargs):
+ """
+ Disconnect a container from this network.
+
+ Args:
+ container (str): Container to disconnect from this network, as
+ either an ID, name, or
+ :py:class:`~docker.models.containers.Container` object.
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.disconnect_container_from_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def remove(self):
+ """
+ Remove this network.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_network(self.id)
+
+
+class NetworkCollection(Collection):
+ """
+ Networks on the Docker server.
+ """
+ model = Network
+
+ def create(self, name, *args, **kwargs):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``None``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ scope (str): Specify the network's scope (``local``, ``global`` or
+ ``swarm``)
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (:py:class:`Network`): The network that was created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.networks.create("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.networks.create(
+ "network1",
+ driver="bridge",
+ ipam=ipam_config
+ )
+
+ """
+ resp = self.client.api.create_network(name, *args, **kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, network_id, *args, **kwargs):
+ """
+ Get a network by its ID.
+
+ Args:
+ network_id (str): The ID of the network.
+ verbose (bool): Retrieve the service details across the cluster in
+ swarm mode.
+ scope (str): Filter the network by scope (``swarm``, ``global``
+ or ``local``).
+
+ Returns:
+ (:py:class:`Network`) The network.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the network does not exist.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ return self.prepare_model(
+ self.client.api.inspect_network(network_id, *args, **kwargs)
+ )
+
+ def list(self, *args, **kwargs):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by.
+ ids (:py:class:`list`): List of ids to filter by.
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+ greedy (bool): Fetch more details for each network individually.
+ You might want this to get the containers attached to them.
+
+ Returns:
+ (list of :py:class:`Network`) The networks on the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ greedy = kwargs.pop('greedy', False)
+ resp = self.client.api.networks(*args, **kwargs)
+ networks = [self.prepare_model(item) for item in resp]
+ if greedy and version_gte(self.client.api._version, '1.28'):
+ for net in networks:
+ net.reload()
+ return networks
+
+ def prune(self, filters=None):
+ return self.client.api.prune_networks(filters=filters)
+ prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/docker/models/nodes.py b/docker/models/nodes.py
new file mode 100644
index 0000000..8dd9350
--- /dev/null
+++ b/docker/models/nodes.py
@@ -0,0 +1,107 @@
+from .resource import Model, Collection
+
+
+class Node(Model):
+ """A node in a swarm."""
+ id_attribute = 'ID'
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def update(self, node_spec):
+ """
+ Update the node's configuration.
+
+ Args:
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> node.update(node_spec)
+
+ """
+ return self.client.api.update_node(self.id, self.version, node_spec)
+
+ def remove(self, force=False):
+ """
+ Remove this node from the swarm.
+
+ Args:
+ force (bool): Force remove an active node. Default: `False`
+
+ Returns:
+ `True` if the request was successful.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_node(self.id, force=force)
+
+
+class NodeCollection(Collection):
+ """Nodes on the Docker server."""
+ model = Node
+
+ def get(self, node_id):
+ """
+ Get a node.
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A :py:class:`Node` object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_node(node_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of :py:class:`Node` objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.nodes.list(filters={'role': 'manager'})
+ """
+ return [
+ self.prepare_model(n)
+ for n in self.client.api.nodes(*args, **kwargs)
+ ]
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
new file mode 100644
index 0000000..0688018
--- /dev/null
+++ b/docker/models/plugins.py
@@ -0,0 +1,200 @@
+from .. import errors
+from .resource import Collection, Model
+
+
+class Plugin(Model):
+ """
+ A plugin on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ """
+ The plugin's name.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def enabled(self):
+ """
+ Whether the plugin is enabled.
+ """
+ return self.attrs.get('Enabled')
+
+ @property
+ def settings(self):
+ """
+ A dictionary representing the plugin's configuration.
+ """
+ return self.attrs.get('Settings')
+
+ def configure(self, options):
+ """
+ Update the plugin's settings.
+
+ Args:
+ options (dict): A key-value mapping of options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.configure_plugin(self.name, options)
+ self.reload()
+
+ def disable(self):
+ """
+ Disable the plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ self.client.api.disable_plugin(self.name)
+ self.reload()
+
+ def enable(self, timeout=0):
+ """
+ Enable the plugin.
+
+ Args:
+ timeout (int): Timeout in seconds. Default: 0
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.enable_plugin(self.name, timeout)
+ self.reload()
+
+ def push(self):
+ """
+ Push the plugin to a remote registry.
+
+ Returns:
+ A dict iterator streaming the status of the upload.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.push_plugin(self.name)
+
+ def remove(self, force=False):
+ """
+ Remove the plugin from the server.
+
+ Args:
+ force (bool): Remove even if the plugin is enabled.
+ Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_plugin(self.name, force=force)
+
+ def upgrade(self, remote=None):
+ """
+ Upgrade the plugin.
+
+ Args:
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ Default: this plugin's name.
+
+ Returns:
+ A generator streaming the decoded API logs
+ """
+ if self.enabled:
+ raise errors.DockerError(
+ 'Plugin must be disabled before upgrading.'
+ )
+
+ if remote is None:
+ remote = self.name
+ privileges = self.client.api.plugin_privileges(remote)
+ for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
+ yield d
+ self._reload()
+
+
+class PluginCollection(Collection):
+ model = Plugin
+
+ def create(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ (:py:class:`Plugin`): The newly created plugin.
+ """
+ self.client.api.create_plugin(name, plugin_data_dir, gzip)
+ return self.get(name)
+
+ def get(self, name):
+ """
+ Gets a plugin.
+
+ Args:
+ name (str): The name of the plugin.
+
+ Returns:
+ (:py:class:`Plugin`): The plugin.
+
+ Raises:
+ :py:class:`docker.errors.NotFound` If the plugin does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_plugin(name))
+
+ def install(self, remote_name, local_name=None):
+ """
+ Pull and install a plugin.
+
+ Args:
+ remote_name (string): Remote reference for the plugin to
+ install. The ``:latest`` tag is optional, and is the
+ default if omitted.
+ local_name (string): Local name for the pulled plugin.
+ The ``:latest`` tag is optional, and is the default if
+ omitted. Optional.
+
+ Returns:
+ (:py:class:`Plugin`): The installed plugin
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ privileges = self.client.api.plugin_privileges(remote_name)
+ it = self.client.api.pull_plugin(remote_name, privileges, local_name)
+ for data in it:
+ pass
+ return self.get(local_name or remote_name)
+
+ def list(self):
+ """
+ List plugins installed on the server.
+
+ Returns:
+ (list of :py:class:`Plugin`): The plugins.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.plugins()
+ return [self.prepare_model(r) for r in resp]
diff --git a/docker/models/resource.py b/docker/models/resource.py
new file mode 100644
index 0000000..ed3900a
--- /dev/null
+++ b/docker/models/resource.py
@@ -0,0 +1,93 @@
+
+class Model(object):
+ """
+ A base class for representing a single object on the server.
+ """
+ id_attribute = 'Id'
+
+ def __init__(self, attrs=None, client=None, collection=None):
+ #: A client pointing at the server that this object is on.
+ self.client = client
+
+ #: The collection that this model is part of.
+ self.collection = collection
+
+ #: The raw representation of this object from the API
+ self.attrs = attrs
+ if self.attrs is None:
+ self.attrs = {}
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.id == other.id
+
+ def __hash__(self):
+ return hash("%s:%s" % (self.__class__.__name__, self.id))
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs.get(self.id_attribute)
+
+ @property
+ def short_id(self):
+ """
+ The ID of the object, truncated to 10 characters.
+ """
+ return self.id[:10]
+
+ def reload(self):
+ """
+ Load this object from the server again and update ``attrs`` with the
+ new data.
+ """
+ new_model = self.collection.get(self.id)
+ self.attrs = new_model.attrs
+
+
+class Collection(object):
+ """
+ A base class for representing all objects of a particular type on the
+ server.
+ """
+
+ #: The type of object this collection represents, set by subclasses
+ model = None
+
+ def __init__(self, client=None):
+ #: The client pointing at the server that this collection of objects
+ #: is on.
+ self.client = client
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError(
+ "'{}' object is not callable. You might be trying to use the old "
+ "(pre-2.0) API - use docker.APIClient if so."
+ .format(self.__class__.__name__))
+
+ def list(self):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+ def create(self, attrs=None):
+ raise NotImplementedError
+
+ def prepare_model(self, attrs):
+ """
+ Create a model from a set of attributes.
+ """
+ if isinstance(attrs, Model):
+ attrs.client = self.client
+ attrs.collection = self
+ return attrs
+ elif isinstance(attrs, dict):
+ return self.model(attrs=attrs, client=self.client, collection=self)
+ else:
+ raise Exception("Can't create %s from %s" %
+ (self.model.__name__, attrs))
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
new file mode 100644
index 0000000..ca11ede
--- /dev/null
+++ b/docker/models/secrets.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Secret(Model):
+ """A secret."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this secret.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If secret failed to remove.
+ """
+ return self.client.api.remove_secret(self.id)
+
+
+class SecretCollection(Collection):
+ """Secrets on the Docker server."""
+ model = Secret
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_secret(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_secret.__doc__
+
+ def get(self, secret_id):
+ """
+ Get a secret.
+
+ Args:
+ secret_id (str): Secret ID.
+
+ Returns:
+ (:py:class:`Secret`): The secret.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the secret does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_secret(secret_id))
+
+ def list(self, **kwargs):
+ """
+ List secrets. Similar to the ``docker secret ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Secret`): The secrets.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.secrets(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/services.py b/docker/models/services.py
new file mode 100644
index 0000000..458d2c8
--- /dev/null
+++ b/docker/models/services.py
@@ -0,0 +1,352 @@
+import copy
+from docker.errors import create_unexpected_kwargs_error, InvalidArgument
+from docker.types import TaskTemplate, ContainerSpec, ServiceMode
+from .resource import Model, Collection
+
+
+class Service(Model):
+ """A service."""
+ id_attribute = 'ID'
+
+ @property
+ def name(self):
+ """The service's name."""
+ return self.attrs['Spec']['Name']
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def remove(self):
+ """
+ Stop and remove the service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_service(self.id)
+
+ def tasks(self, filters=None):
+ """
+ List the tasks in this service.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``node``,
+ ``label``, and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if filters is None:
+ filters = {}
+ filters['service'] = self.id
+ return self.client.api.tasks(filters=filters)
+
+ def update(self, **kwargs):
+ """
+ Update a service's configuration. Similar to the ``docker service
+ update`` command.
+
+ Takes the same parameters as :py:meth:`~ServiceCollection.create`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # Image is required, so if it hasn't been set, use current image
+ if 'image' not in kwargs:
+ spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ kwargs['image'] = spec['Image']
+
+ if kwargs.get('force_update') is True:
+ task_template = self.attrs['Spec']['TaskTemplate']
+ current_value = int(task_template.get('ForceUpdate', 0))
+ kwargs['force_update'] = current_value + 1
+
+ create_kwargs = _get_create_service_kwargs('update', kwargs)
+
+ return self.client.api.update_service(
+ self.id,
+ self.version,
+ **create_kwargs
+ )
+
+ def logs(self, **kwargs):
+ """
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns (generator): Logs for the service.
+ """
+ is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
+ 'TTY', False
+ )
+ return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
+
+ def scale(self, replicas):
+ """
+ Scale service container.
+
+ Args:
+ replicas (int): The number of containers that should be running.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ if 'Global' in self.attrs['Spec']['Mode'].keys():
+ raise InvalidArgument('Cannot scale a global container')
+
+ service_mode = ServiceMode('replicated', replicas)
+ return self.client.api.update_service(self.id, self.version,
+ mode=service_mode,
+ fetch_current_spec=True)
+
+ def force_update(self):
+ """
+ Force update the service even if no changes require it.
+
+ Returns:
+ ``True``if successful.
+ """
+
+ return self.update(force_update=True, fetch_current_spec=True)
+
+
+class ServiceCollection(Collection):
+ """Services on the Docker server."""
+ model = Service
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a service. Similar to the ``docker service create`` command.
+
+ Args:
+ image (str): The image name to use for the containers.
+ command (list of str or str): Command to run.
+ args (list of str): Arguments to the command.
+ constraints (list of str): Placement constraints.
+ container_labels (dict): Labels to apply to the container.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ env (list of str): Environment variables, in the form
+ ``KEY=val``.
+ hostname (string): Hostname to set on the container.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ labels (dict): Labels to apply to the service.
+ log_driver (str): Log driver to use for containers.
+ log_driver_options (dict): Log driver options.
+ mode (ServiceMode): Scheduling mode for the service.
+ Default:``None``
+ mounts (list of str): Mounts for the containers, in the form
+ ``source:target:options``, where options is either
+ ``ro`` or ``rw``.
+ name (str): Name to give to the service.
+ networks (list of str): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ resources (Resources): Resource limits and reservations.
+ restart_policy (RestartPolicy): Restart policy for containers.
+ secrets (list of :py:class:`docker.types.SecretReference`): List
+ of secrets accessible to containers for this service.
+ stop_grace_period (int): Amount of time to wait for
+ containers to terminate before forcefully killing them.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ user (str): User to run commands as.
+ workdir (str): Working directory for commands to run.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's `hosts` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference`
+ that will be exposed to the service.
+ privileges (Privileges): Security options for the service's
+ containers.
+
+ Returns:
+ (:py:class:`Service`) The created service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ kwargs['image'] = image
+ kwargs['command'] = command
+ create_kwargs = _get_create_service_kwargs('create', kwargs)
+ service_id = self.client.api.create_service(**create_kwargs)
+ return self.get(service_id)
+
+ def get(self, service_id, insert_defaults=None):
+ """
+ Get a service.
+
+ Args:
+ service_id (str): The ID of the service.
+ insert_defaults (boolean): If true, default values will be merged
+ into the output.
+
+ Returns:
+ (:py:class:`Service`): The service.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the service does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.InvalidVersion`
+ If one of the arguments is not supported with the current
+ API version.
+ """
+ return self.prepare_model(
+ self.client.api.inspect_service(service_id, insert_defaults)
+ )
+
+ def list(self, **kwargs):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name`` , ``label`` and ``mode``.
+ Default: ``None``.
+
+ Returns:
+ (list of :py:class:`Service`): The services.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return [
+ self.prepare_model(s)
+ for s in self.client.api.services(**kwargs)
+ ]
+
+
+# kwargs to copy straight over to ContainerSpec
+CONTAINER_SPEC_KWARGS = [
+ 'args',
+ 'command',
+ 'configs',
+ 'dns_config',
+ 'env',
+ 'groups',
+ 'healthcheck',
+ 'hostname',
+ 'hosts',
+ 'image',
+ 'isolation',
+ 'labels',
+ 'mounts',
+ 'open_stdin',
+ 'privileges',
+ 'read_only',
+ 'secrets',
+ 'stop_grace_period',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'workdir',
+]
+
+# kwargs to copy straight over to TaskTemplate
+TASK_TEMPLATE_KWARGS = [
+ 'networks',
+ 'resources',
+ 'restart_policy',
+]
+
+# kwargs to copy straight over to create_service
+CREATE_SERVICE_KWARGS = [
+ 'name',
+ 'labels',
+ 'mode',
+ 'update_config',
+ 'endpoint_spec',
+]
+
+
+def _get_create_service_kwargs(func_name, kwargs):
+ # Copy over things which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CREATE_SERVICE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ container_spec_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CONTAINER_SPEC_KWARGS:
+ container_spec_kwargs[key] = kwargs.pop(key)
+ task_template_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in TASK_TEMPLATE_KWARGS:
+ task_template_kwargs[key] = kwargs.pop(key)
+
+ if 'container_labels' in kwargs:
+ container_spec_kwargs['labels'] = kwargs.pop('container_labels')
+
+ if 'constraints' in kwargs:
+ task_template_kwargs['placement'] = {
+ 'Constraints': kwargs.pop('constraints')
+ }
+
+ if 'log_driver' in kwargs:
+ task_template_kwargs['log_driver'] = {
+ 'Name': kwargs.pop('log_driver'),
+ 'Options': kwargs.pop('log_driver_options', {})
+ }
+
+ if func_name == 'update':
+ if 'force_update' in kwargs:
+ task_template_kwargs['force_update'] = kwargs.pop('force_update')
+
+ # fetch the current spec by default if updating the service
+ # through the model
+ fetch_current_spec = kwargs.pop('fetch_current_spec', True)
+ create_kwargs['fetch_current_spec'] = fetch_current_spec
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error(func_name, kwargs)
+
+ container_spec = ContainerSpec(**container_spec_kwargs)
+ task_template_kwargs['container_spec'] = container_spec
+ create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
+ return create_kwargs
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
new file mode 100644
index 0000000..7396e73
--- /dev/null
+++ b/docker/models/swarm.py
@@ -0,0 +1,168 @@
+from docker.api import APIClient
+from docker.errors import APIError
+from .resource import Model
+
+
+class Swarm(Model):
+ """
+ The server's Swarm state. This a singleton that must be reloaded to get
+ the current state of the Swarm.
+ """
+ id_attribute = 'ID'
+
+ def __init__(self, *args, **kwargs):
+ super(Swarm, self).__init__(*args, **kwargs)
+ if self.client:
+ try:
+ self.reload()
+ except APIError as e:
+ # FIXME: https://github.com/docker/docker/issues/29192
+ if e.response.status_code not in (406, 503):
+ raise
+
+ @property
+ def version(self):
+ """
+ The version number of the swarm. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def get_unlock_key(self):
+ return self.client.api.get_unlock_key()
+ get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
+
+ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, **kwargs):
+ """
+ Initialize a new swarm on this Engine.
+
+ Args:
+ advertise_addr (str): Externally reachable address advertised to
+ other nodes. This can either be an address/port combination in
+ the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used.
+
+ If not specified, it will be automatically detected when
+ possible.
+ listen_addr (str): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: ``0.0.0.0:2377``
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+ labels (dict): User-defined key/value metadata.
+ signing_ca_cert (str): The desired signing CA certificate for all
+ swarm node TLS leaf certificates, in PEM format.
+ signing_ca_key (str): The desired signing CA key for all swarm
+ node TLS leaf certificates, in PEM format.
+ ca_force_rotate (int): An integer whose purpose is to force swarm
+ to generate a new signing CA certificate and key, if none have
+ been specified.
+ autolock_managers (boolean): If set, generate a key and use it to
+ lock data stored on the managers.
+ log_driver (DriverConfig): The default log driver to use for tasks
+ created in the orchestrator.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.swarm.init(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, snapshot_interval=5000,
+ log_entries_for_slow_followers=1200
+ )
+
+ """
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster
+ }
+ init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
+ self.client.api.init_swarm(**init_kwargs)
+ self.reload()
+
+ def join(self, *args, **kwargs):
+ return self.client.api.join_swarm(*args, **kwargs)
+ join.__doc__ = APIClient.join_swarm.__doc__
+
+ def leave(self, *args, **kwargs):
+ return self.client.api.leave_swarm(*args, **kwargs)
+ leave.__doc__ = APIClient.leave_swarm.__doc__
+
+ def reload(self):
+ """
+ Inspect the swarm on the server and store the response in
+ :py:attr:`attrs`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.attrs = self.client.api.inspect_swarm()
+
+ def unlock(self, key):
+ return self.client.api.unlock_swarm(key)
+ unlock.__doc__ = APIClient.unlock_swarm.__doc__
+
+ def update(self, rotate_worker_token=False, rotate_manager_token=False,
+ **kwargs):
+ """
+ Update the swarm's configuration.
+
+ It takes the same arguments as :py:meth:`init`, except
+ ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
+ addition, it takes these arguments:
+
+ Args:
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ # this seems to have to be set
+ if kwargs.get('node_cert_expiry') is None:
+ kwargs['node_cert_expiry'] = 7776000000000000
+
+ return self.client.api.update_swarm(
+ version=self.version,
+ swarm_spec=self.client.api.create_swarm_spec(**kwargs),
+ rotate_worker_token=rotate_worker_token,
+ rotate_manager_token=rotate_manager_token
+ )
diff --git a/docker/models/volumes.py b/docker/models/volumes.py
new file mode 100644
index 0000000..3c2e837
--- /dev/null
+++ b/docker/models/volumes.py
@@ -0,0 +1,99 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Volume(Model):
+ """A volume."""
+ id_attribute = 'Name'
+
+ @property
+ def name(self):
+ """The name of the volume."""
+ return self.attrs['Name']
+
+ def remove(self, force=False):
+ """
+ Remove this volume.
+
+ Args:
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ return self.client.api.remove_volume(self.id, force=force)
+
+
+class VolumeCollection(Collection):
+ """Volumes on the Docker server."""
+ model = Volume
+
+ def create(self, name=None, **kwargs):
+ """
+ Create a volume.
+
+ Args:
+ name (str): Name of the volume. If not specified, the engine
+ generates a name.
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (:py:class:`Volume`): The volume created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.volumes.create(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+
+ """
+ obj = self.client.api.create_volume(name, **kwargs)
+ return self.prepare_model(obj)
+
+ def get(self, volume_id):
+ """
+ Get a volume.
+
+ Args:
+ volume_id (str): Volume name.
+
+ Returns:
+ (:py:class:`Volume`): The volume.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the volume does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_volume(volume_id))
+
+ def list(self, **kwargs):
+ """
+ List volumes. Similar to the ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Volume`): The volumes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.volumes(**kwargs)
+ if not resp.get('Volumes'):
+ return []
+ return [self.prepare_model(obj) for obj in resp['Volumes']]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_volumes(filters=filters)
+ prune.__doc__ = APIClient.prune_volumes.__doc__
diff --git a/docker/tls.py b/docker/tls.py
new file mode 100644
index 0000000..4900e9f
--- /dev/null
+++ b/docker/tls.py
@@ -0,0 +1,112 @@
+import os
+import ssl
+
+from . import errors
+from .transport import SSLAdapter
+
+
+class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ cert = None
+ ca_cert = None
+ verify = None
+ ssl_version = None
+
+ def __init__(self, client_cert=None, ca_cert=None, verify=None,
+ ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None):
+ # Argument compatibility/mapping with
+ # https://docs.docker.com/engine/articles/https/
+ # This diverges from the Docker CLI in that users can specify 'tls'
+ # here, but also disable any public/default CA pool verification by
+ # leaving tls_verify=False
+
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ # TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
+ # depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
+ # to exclude versions. But I think that might require a bigger
+ # architectural change, so I've opted not to pursue it at this time
+
+ # If the user provides an SSL version, we should use their preference
+ if ssl_version:
+ self.ssl_version = ssl_version
+ else:
+ # If the user provides no ssl version, we should default to
+ # TLSv1_2. This option is the most secure, and will work for the
+ # majority of users with reasonably up-to-date software. However,
+ # before doing so, detect openssl version to ensure we can support
+ # it.
+ if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
+ ssl, 'PROTOCOL_TLSv1_2'):
+ # If the OpenSSL version is high enough to support TLSv1_2,
+ # then we should use it.
+ self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
+ else:
+ # Otherwise, TLS v1.0 seems to be the safest default;
+ # SSLv23 fails in mysterious ways:
+ # https://github.com/docker/docker-py/issues/963
+ self.ssl_version = ssl.PROTOCOL_TLSv1
+
+ # "tls" and "tls_verify" must have both or neither cert/key files In
+ # either case, Alert the user when both are expected, but any are
+ # missing.
+
+ if client_cert:
+ try:
+ tls_cert, tls_key = client_cert
+ except ValueError:
+ raise errors.TLSParameterError(
+ 'client_config must be a tuple of'
+ ' (client certificate, key file)'
+ )
+
+ if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
+ not os.path.isfile(tls_key)):
+ raise errors.TLSParameterError(
+ 'Path to a certificate and key files must be provided'
+ ' through the client_config param'
+ )
+ self.cert = (tls_cert, tls_key)
+
+ # If verify is set, make sure the cert exists
+ self.verify = verify
+ self.ca_cert = ca_cert
+ if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+ raise errors.TLSParameterError(
+ 'Invalid CA certificate provided for `tls_ca_cert`.'
+ )
+
+ def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
+ client.ssl_version = self.ssl_version
+
+ if self.verify and self.ca_cert:
+ client.verify = self.ca_cert
+ else:
+ client.verify = self.verify
+
+ if self.cert:
+ client.cert = self.cert
+
+ client.mount('https://', SSLAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ ))
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
new file mode 100644
index 0000000..abbee18
--- /dev/null
+++ b/docker/transport/__init__.py
@@ -0,0 +1,8 @@
+# flake8: noqa
+from .unixconn import UnixAdapter
+from .ssladapter import SSLAdapter
+try:
+ from .npipeconn import NpipeAdapter
+ from .npipesocket import NpipeSocket
+except ImportError:
+ pass
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
new file mode 100644
index 0000000..ab9b904
--- /dev/null
+++ b/docker/transport/npipeconn.py
@@ -0,0 +1,108 @@
+import six
+import requests.adapters
+
+from .. import constants
+from .npipesocket import NpipeSocket
+
+if six.PY3:
+ import http.client as httplib
+else:
+ import httplib
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class NpipeHTTPConnection(httplib.HTTPConnection, object):
+ def __init__(self, npipe_path, timeout=60):
+ super(NpipeHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def connect(self):
+ sock = NpipeSocket()
+ sock.settimeout(self.timeout)
+ sock.connect(self.npipe_path)
+ self.sock = sock
+
+
+class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, npipe_path, timeout=60, maxsize=10):
+ super(NpipeHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return NpipeHTTPConnection(
+ self.npipe_path, self.timeout
+ )
+
+ # When re-using connections, urllib3 tries to call select() on our
+ # NpipeSocket instance, causing a crash. To circumvent this, we override
+ # _get_conn, where that check happens.
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except six.moves.queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class NpipeAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout']
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS):
+ self.npipe_path = base_url.replace('npipe://', '')
+ self.timeout = timeout
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(NpipeAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = NpipeHTTPConnectionPool(
+ self.npipe_path, self.timeout
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
+ return request.path_url
+
+ def close(self):
+ self.pools.clear()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
new file mode 100644
index 0000000..c04b39d
--- /dev/null
+++ b/docker/transport/npipesocket.py
@@ -0,0 +1,219 @@
+import functools
+import io
+
+import six
+import win32file
+import win32pipe
+
+cERROR_PIPE_BUSY = 0xe7
+cSECURITY_SQOS_PRESENT = 0x100000
+cSECURITY_ANONYMOUS = 0
+
+RETRY_WAIT_TIMEOUT = 10000
+
+
+def check_closed(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if self._closed:
+ raise RuntimeError(
+ 'Can not reuse socket after connection was closed.'
+ )
+ return f(self, *args, **kwargs)
+ return wrapped
+
+
+class NpipeSocket(object):
+ """ Partial implementation of the socket API over windows named pipes.
+ This implementation is only designed to be used as a client socket,
+ and server-specific methods (bind, listen, accept...) are not
+ implemented.
+ """
+
+ def __init__(self, handle=None):
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ self._handle = handle
+ self._closed = False
+
+ def accept(self):
+ raise NotImplementedError()
+
+ def bind(self, address):
+ raise NotImplementedError()
+
+ def close(self):
+ self._handle.Close()
+ self._closed = True
+
+ @check_closed
+ def connect(self, address):
+ win32pipe.WaitNamedPipe(address, self._timeout)
+ try:
+ handle = win32file.CreateFile(
+ address,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE,
+ 0,
+ None,
+ win32file.OPEN_EXISTING,
+ cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
+ 0
+ )
+ except win32pipe.error as e:
+ # See Remarks:
+ # https://msdn.microsoft.com/en-us/library/aa365800.aspx
+ if e.winerror == cERROR_PIPE_BUSY:
+ # Another program or thread has grabbed our pipe instance
+ # before we got to it. Wait for availability and attempt to
+ # connect again.
+ win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
+ return self.connect(address)
+ raise e
+
+ self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
+
+ self._handle = handle
+ self._address = address
+
+ @check_closed
+ def connect_ex(self, address):
+ return self.connect(address)
+
+ @check_closed
+ def detach(self):
+ self._closed = True
+ return self._handle
+
+ @check_closed
+ def dup(self):
+ return NpipeSocket(self._handle)
+
+ @check_closed
+ def fileno(self):
+ return int(self._handle)
+
+ def getpeername(self):
+ return self._address
+
+ def getsockname(self):
+ return self._address
+
+ def getsockopt(self, level, optname, buflen=None):
+ raise NotImplementedError()
+
+ def ioctl(self, control, option):
+ raise NotImplementedError()
+
+ def listen(self, backlog):
+ raise NotImplementedError()
+
+ def makefile(self, mode=None, bufsize=None):
+ if mode.strip('b') != 'r':
+ raise NotImplementedError()
+ rawio = NpipeFileIOBase(self)
+ if bufsize is None or bufsize <= 0:
+ bufsize = io.DEFAULT_BUFFER_SIZE
+ return io.BufferedReader(rawio, buffer_size=bufsize)
+
+ @check_closed
+ def recv(self, bufsize, flags=0):
+ err, data = win32file.ReadFile(self._handle, bufsize)
+ return data
+
+ @check_closed
+ def recvfrom(self, bufsize, flags=0):
+ data = self.recv(bufsize, flags)
+ return (data, self._address)
+
+ @check_closed
+ def recvfrom_into(self, buf, nbytes=0, flags=0):
+ return self.recv_into(buf, nbytes, flags), self._address
+
+ @check_closed
+ def recv_into(self, buf, nbytes=0):
+ if six.PY2:
+ return self._recv_into_py2(buf, nbytes)
+
+ readbuf = buf
+ if not isinstance(buf, memoryview):
+ readbuf = memoryview(buf)
+
+ err, data = win32file.ReadFile(
+ self._handle,
+ readbuf[:nbytes] if nbytes else readbuf
+ )
+ return len(data)
+
+ def _recv_into_py2(self, buf, nbytes):
+ err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
+ n = len(data)
+ buf[:n] = data
+ return n
+
+ @check_closed
+ def send(self, string, flags=0):
+ err, nbytes = win32file.WriteFile(self._handle, string)
+ return nbytes
+
+ @check_closed
+ def sendall(self, string, flags=0):
+ return self.send(string, flags)
+
+ @check_closed
+ def sendto(self, string, address):
+ self.connect(address)
+ return self.send(string)
+
+ def setblocking(self, flag):
+ if flag:
+ return self.settimeout(None)
+ return self.settimeout(0)
+
+ def settimeout(self, value):
+ if value is None:
+ # Blocking mode
+ self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
+ elif not isinstance(value, (float, int)) or value < 0:
+ raise ValueError('Timeout value out of range')
+ elif value == 0:
+ # Non-blocking mode
+ self._timeout = win32pipe.NMPWAIT_NO_WAIT
+ else:
+ # Timeout mode - Value converted to milliseconds
+ self._timeout = value * 1000
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setsockopt(self, level, optname, value):
+ raise NotImplementedError()
+
+ @check_closed
+ def shutdown(self, how):
+ return self.close()
+
+
+class NpipeFileIOBase(io.RawIOBase):
+ def __init__(self, npipe_socket):
+ self.sock = npipe_socket
+
+ def close(self):
+ super(NpipeFileIOBase, self).close()
+ self.sock = None
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return True
+
+ def readinto(self, buf):
+ return self.sock.recv_into(buf)
+
+ def seekable(self):
+ return False
+
+ def writable(self):
+ return False
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
new file mode 100644
index 0000000..8fafec3
--- /dev/null
+++ b/docker/transport/ssladapter.py
@@ -0,0 +1,71 @@
+""" Resolves OpenSSL issues in some servers:
+ https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
+ https://github.com/kennethreitz/requests/pull/799
+"""
+import sys
+
+from distutils.version import StrictVersion
+from requests.adapters import HTTPAdapter
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+PoolManager = urllib3.poolmanager.PoolManager
+
+# Monkey-patching match_hostname with a version that supports
+# IP-address checking. Not necessary for Python 3.5 and above
+if sys.version_info[0] < 3 or sys.version_info[1] < 5:
+ from backports.ssl_match_hostname import match_hostname
+ urllib3.connection.match_hostname = match_hostname
+
+
+class SSLAdapter(HTTPAdapter):
+ '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
+ 'assert_hostname',
+ 'ssl_version']
+
+ def __init__(self, ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None, **kwargs):
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ super(SSLAdapter, self).__init__(**kwargs)
+
+ def init_poolmanager(self, connections, maxsize, block=False):
+ kwargs = {
+ 'num_pools': connections,
+ 'maxsize': maxsize,
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
+ 'assert_fingerprint': self.assert_fingerprint,
+ }
+ if self.ssl_version and self.can_override_ssl_version():
+ kwargs['ssl_version'] = self.ssl_version
+
+ self.poolmanager = PoolManager(**kwargs)
+
+ def get_connection(self, *args, **kwargs):
+ """
+ Ensure assert_hostname is set correctly on our pool
+
+ We already take care of a normal poolmanager via init_poolmanager
+
+ But we still need to take care of when there is a proxy poolmanager
+ """
+ conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+ if conn.assert_hostname != self.assert_hostname:
+ conn.assert_hostname = self.assert_hostname
+ return conn
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
new file mode 100644
index 0000000..c59821a
--- /dev/null
+++ b/docker/transport/unixconn.py
@@ -0,0 +1,112 @@
+import six
+import requests.adapters
+import socket
+from six.moves import http_client as httplib
+
+from .. import constants
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class UnixHTTPResponse(httplib.HTTPResponse, object):
+ def __init__(self, sock, *args, **kwargs):
+ disable_buffering = kwargs.pop('disable_buffering', False)
+ if six.PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
+ super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
+
+
+class UnixHTTPConnection(httplib.HTTPConnection, object):
+
+ def __init__(self, base_url, unix_socket, timeout=60):
+ super(UnixHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.base_url = base_url
+ self.unix_socket = unix_socket
+ self.timeout = timeout
+ self.disable_buffering = False
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(self.timeout)
+ sock.connect(self.unix_socket)
+ self.sock = sock
+
+ def putheader(self, header, *values):
+ super(UnixHTTPConnection, self).putheader(header, *values)
+ if header == 'Connection' and 'Upgrade' in values:
+ self.disable_buffering = True
+
+ def response_class(self, sock, *args, **kwargs):
+ if self.disable_buffering:
+ kwargs['disable_buffering'] = True
+
+ return UnixHTTPResponse(sock, *args, **kwargs)
+
+
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
+ super(UnixHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.base_url = base_url
+ self.socket_path = socket_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return UnixHTTPConnection(
+ self.base_url, self.socket_path, self.timeout
+ )
+
+
+class UnixAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout']
+
+ def __init__(self, socket_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS):
+ socket_path = socket_url.replace('http+unix://', '')
+ if not socket_path.startswith('/'):
+ socket_path = '/' + socket_path
+ self.socket_path = socket_path
+ self.timeout = timeout
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(UnixAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(
+ url, self.socket_path, self.timeout
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
+
+ def close(self):
+ self.pools.clear()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
new file mode 100644
index 0000000..0b0d847
--- /dev/null
+++ b/docker/types/__init__.py
@@ -0,0 +1,11 @@
+# flake8: noqa
+from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .daemon import CancellableStream
+from .healthcheck import Healthcheck
+from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
+from .services import (
+ ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
+ Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
+ ServiceMode, TaskTemplate, UpdateConfig
+)
+from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/base.py b/docker/types/base.py
new file mode 100644
index 0000000..6891062
--- /dev/null
+++ b/docker/types/base.py
@@ -0,0 +1,7 @@
+import six
+
+
+class DictType(dict):
+ def __init__(self, init):
+ for k, v in six.iteritems(init):
+ self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
new file mode 100644
index 0000000..2521420
--- /dev/null
+++ b/docker/types/containers.py
@@ -0,0 +1,598 @@
+import six
+
+from .. import errors
+from ..utils.utils import (
+ convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
+ format_environment, format_extra_hosts, normalize_links, parse_bytes,
+ parse_devices, split_command, version_gte, version_lt,
+)
+from .base import DictType
+from .healthcheck import Healthcheck
+
+
+class LogConfigTypesEnum(object):
+ _values = (
+ 'json-file',
+ 'syslog',
+ 'journald',
+ 'gelf',
+ 'fluentd',
+ 'none'
+ )
+ JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
+
+
+class LogConfig(DictType):
+ types = LogConfigTypesEnum
+
+ def __init__(self, **kwargs):
+ log_driver_type = kwargs.get('type', kwargs.get('Type'))
+ config = kwargs.get('config', kwargs.get('Config')) or {}
+
+ if config and not isinstance(config, dict):
+ raise ValueError("LogConfig.config must be a dictionary")
+
+ super(LogConfig, self).__init__({
+ 'Type': log_driver_type,
+ 'Config': config
+ })
+
+ @property
+ def type(self):
+ return self['Type']
+
+ @type.setter
+ def type(self, value):
+ self['Type'] = value
+
+ @property
+ def config(self):
+ return self['Config']
+
+ def set_config_value(self, key, value):
+ self.config[key] = value
+
+ def unset_config(self, key):
+ if key in self.config:
+ del self.config[key]
+
+
+class Ulimit(DictType):
+ def __init__(self, **kwargs):
+ name = kwargs.get('name', kwargs.get('Name'))
+ soft = kwargs.get('soft', kwargs.get('Soft'))
+ hard = kwargs.get('hard', kwargs.get('Hard'))
+ if not isinstance(name, six.string_types):
+ raise ValueError("Ulimit.name must be a string")
+ if soft and not isinstance(soft, int):
+ raise ValueError("Ulimit.soft must be an integer")
+ if hard and not isinstance(hard, int):
+ raise ValueError("Ulimit.hard must be an integer")
+ super(Ulimit, self).__init__({
+ 'Name': name,
+ 'Soft': soft,
+ 'Hard': hard
+ })
+
+ @property
+ def name(self):
+ return self['Name']
+
+ @name.setter
+ def name(self, value):
+ self['Name'] = value
+
+ @property
+ def soft(self):
+ return self.get('Soft')
+
+ @soft.setter
+ def soft(self, value):
+ self['Soft'] = value
+
+ @property
+ def hard(self):
+ return self.get('Hard')
+
+ @hard.setter
+ def hard(self, value):
+ self['Hard'] = value
+
+
+class HostConfig(dict):
+ def __init__(self, version, binds=None, port_bindings=None,
+ lxc_conf=None, publish_all_ports=False, links=None,
+ privileged=False, dns=None, dns_search=None,
+ volumes_from=None, network_mode=None, restart_policy=None,
+ cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_reservation=None,
+ kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None, cpu_period=None,
+ blkio_weight=None, blkio_weight_device=None,
+ device_read_bps=None, device_write_bps=None,
+ device_read_iops=None, device_write_iops=None,
+ oom_kill_disable=False, shm_size=None, sysctls=None,
+ tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, pids_limit=None,
+ isolation=None, auto_remove=False, storage_opt=None,
+ init=None, init_path=None, volume_driver=None,
+ cpu_count=None, cpu_percent=None, nano_cpus=None,
+ cpuset_mems=None, runtime=None, mounts=None,
+ cpu_rt_period=None, cpu_rt_runtime=None,
+ device_cgroup_rules=None):
+
+ if mem_limit is not None:
+ self['Memory'] = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ self['MemorySwap'] = parse_bytes(memswap_limit)
+
+ if mem_reservation:
+ self['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ self['KernelMemory'] = parse_bytes(kernel_memory)
+
+ if mem_swappiness is not None:
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ self['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, six.string_types):
+ shm_size = parse_bytes(shm_size)
+
+ self['ShmSize'] = shm_size
+
+ if pid_mode:
+ if version_lt(version, '1.24') and pid_mode != 'host':
+ raise host_config_value_error('pid_mode', pid_mode)
+ self['PidMode'] = pid_mode
+
+ if ipc_mode:
+ self['IpcMode'] = ipc_mode
+
+ if privileged:
+ self['Privileged'] = privileged
+
+ if oom_kill_disable:
+ self['OomKillDisable'] = oom_kill_disable
+
+ if oom_score_adj:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('oom_score_adj', '1.22')
+ if not isinstance(oom_score_adj, int):
+ raise host_config_type_error(
+ 'oom_score_adj', oom_score_adj, 'int'
+ )
+ self['OomScoreAdj'] = oom_score_adj
+
+ if publish_all_ports:
+ self['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ self['ReadonlyRootfs'] = read_only
+
+ if dns_search:
+ self['DnsSearch'] = dns_search
+
+ if network_mode:
+ self['NetworkMode'] = network_mode
+ elif network_mode is None:
+ self['NetworkMode'] = 'default'
+
+ if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
+ self['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ self['CapAdd'] = cap_add
+
+ if cap_drop:
+ self['CapDrop'] = cap_drop
+
+ if devices:
+ self['Devices'] = parse_devices(devices)
+
+ if group_add:
+ self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+
+ if dns is not None:
+ self['Dns'] = dns
+
+ if dns_opt is not None:
+ self['DnsOptions'] = dns_opt
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise host_config_type_error(
+ 'security_opt', security_opt, 'list'
+ )
+
+ self['SecurityOpt'] = security_opt
+
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ self['Sysctls'] = {}
+ for k, v in six.iteritems(sysctls):
+ self['Sysctls'][k] = six.text_type(v)
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+
+ self['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ self['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ self['PortBindings'] = convert_port_bindings(port_bindings)
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = format_extra_hosts(extra_hosts)
+
+ self['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ self['Links'] = normalize_links(links)
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in six.iteritems(lxc_conf):
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ self['LxcConf'] = lxc_conf
+
+ if cgroup_parent is not None:
+ self['CgroupParent'] = cgroup_parent
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise host_config_type_error('ulimits', ulimits, 'list')
+ self['Ulimits'] = []
+ for l in ulimits:
+ if not isinstance(l, Ulimit):
+ l = Ulimit(**l)
+ self['Ulimits'].append(l)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
+ )
+ log_config = LogConfig(**log_config)
+
+ self['LogConfig'] = log_config
+
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
+ self['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
+ self['CpuPeriod'] = cpu_period
+
+ if cpu_shares:
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ self['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ self['CpusetCpus'] = cpuset_cpus
+
+ if cpuset_mems:
+ if not isinstance(cpuset_mems, str):
+ raise host_config_type_error(
+ 'cpuset_mems', cpuset_mems, 'str'
+ )
+ self['CpusetMems'] = cpuset_mems
+
+ if cpu_rt_period:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_period', '1.25')
+
+ if not isinstance(cpu_rt_period, int):
+ raise host_config_type_error(
+ 'cpu_rt_period', cpu_rt_period, 'int'
+ )
+ self['CPURealtimePeriod'] = cpu_rt_period
+
+ if cpu_rt_runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_rt_runtime', '1.25')
+
+ if not isinstance(cpu_rt_runtime, int):
+ raise host_config_type_error(
+ 'cpu_rt_runtime', cpu_rt_runtime, 'int'
+ )
+ self['CPURealtimeRuntime'] = cpu_rt_runtime
+
+ if blkio_weight:
+ if not isinstance(blkio_weight, int):
+ raise host_config_type_error(
+ 'blkio_weight', blkio_weight, 'int'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight', '1.22')
+ self["BlkioWeight"] = blkio_weight
+
+ if blkio_weight_device:
+ if not isinstance(blkio_weight_device, list):
+ raise host_config_type_error(
+ 'blkio_weight_device', blkio_weight_device, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight_device', '1.22')
+ self["BlkioWeightDevice"] = blkio_weight_device
+
+ if device_read_bps:
+ if not isinstance(device_read_bps, list):
+ raise host_config_type_error(
+ 'device_read_bps', device_read_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_bps', '1.22')
+ self["BlkioDeviceReadBps"] = device_read_bps
+
+ if device_write_bps:
+ if not isinstance(device_write_bps, list):
+ raise host_config_type_error(
+ 'device_write_bps', device_write_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_bps', '1.22')
+ self["BlkioDeviceWriteBps"] = device_write_bps
+
+ if device_read_iops:
+ if not isinstance(device_read_iops, list):
+ raise host_config_type_error(
+ 'device_read_iops', device_read_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_iops', '1.22')
+ self["BlkioDeviceReadIOps"] = device_read_iops
+
+ if device_write_iops:
+ if not isinstance(device_write_iops, list):
+ raise host_config_type_error(
+ 'device_write_iops', device_write_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_iops', '1.22')
+ self["BlkioDeviceWriteIOps"] = device_write_iops
+
+ if tmpfs:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('tmpfs', '1.22')
+ self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ self['UsernsMode'] = userns_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ self["PidsLimit"] = pids_limit
+
+ if isolation:
+ if not isinstance(isolation, six.string_types):
+ raise host_config_type_error('isolation', isolation, 'string')
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('isolation', '1.24')
+ self['Isolation'] = isolation
+
+ if auto_remove:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('auto_remove', '1.25')
+ self['AutoRemove'] = auto_remove
+
+ if storage_opt is not None:
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('storage_opt', '1.24')
+ self['StorageOpt'] = storage_opt
+
+ if init is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init', '1.25')
+ self['Init'] = init
+
+ if init_path is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init_path', '1.25')
+
+ if version_gte(version, '1.29'):
+ # https://github.com/moby/moby/pull/32470
+ raise host_config_version_error('init_path', '1.29', False)
+ self['InitPath'] = init_path
+
+ if volume_driver is not None:
+ self['VolumeDriver'] = volume_driver
+
+ if cpu_count:
+ if not isinstance(cpu_count, int):
+ raise host_config_type_error('cpu_count', cpu_count, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_count', '1.25')
+
+ self['CpuCount'] = cpu_count
+
+ if cpu_percent:
+ if not isinstance(cpu_percent, int):
+ raise host_config_type_error('cpu_percent', cpu_percent, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_percent', '1.25')
+
+ self['CpuPercent'] = cpu_percent
+
+ if nano_cpus:
+ if not isinstance(nano_cpus, six.integer_types):
+ raise host_config_type_error('nano_cpus', nano_cpus, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('nano_cpus', '1.25')
+
+ self['NanoCpus'] = nano_cpus
+
+ if runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('runtime', '1.25')
+ self['Runtime'] = runtime
+
+ if mounts is not None:
+ if version_lt(version, '1.30'):
+ raise host_config_version_error('mounts', '1.30')
+ self['Mounts'] = mounts
+
+ if device_cgroup_rules is not None:
+ if version_lt(version, '1.28'):
+ raise host_config_version_error('device_cgroup_rules', '1.28')
+ if not isinstance(device_cgroup_rules, list):
+ raise host_config_type_error(
+ 'device_cgroup_rules', device_cgroup_rules, 'list'
+ )
+ self['DeviceCgroupRules'] = device_cgroup_rules
+
+
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+class ContainerConfig(dict):
+ def __init__(
+ self, version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, ports=None, environment=None,
+ volumes=None, network_disabled=False, entrypoint=None,
+ working_dir=None, domainname=None, host_config=None, mac_address=None,
+ labels=None, stop_signal=None, networking_config=None,
+ healthcheck=None, stop_timeout=None, runtime=None
+ ):
+
+ if stop_timeout is not None and version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'stop_timeout was only introduced in API version 1.25'
+ )
+
+ if healthcheck is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'Health options were only introduced in API version 1.24'
+ )
+
+ if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
+ raise errors.InvalidVersion(
+ 'healthcheck start period was introduced in API '
+ 'version 1.29'
+ )
+
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+
+ if isinstance(environment, dict):
+ environment = format_environment(environment)
+
+ if isinstance(labels, list):
+ labels = dict((lbl, six.text_type('')) for lbl in labels)
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if healthcheck and isinstance(healthcheck, dict):
+ healthcheck = Healthcheck(**healthcheck)
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ self.update({
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': six.text_type(user) if user else None,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'WorkingDir': working_dir,
+ 'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ 'StopSignal': stop_signal,
+ 'Healthcheck': healthcheck,
+ 'StopTimeout': stop_timeout,
+ 'Runtime': runtime
+ })
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
new file mode 100644
index 0000000..ee8624e
--- /dev/null
+++ b/docker/types/daemon.py
@@ -0,0 +1,64 @@
+import socket
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+class CancellableStream(object):
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print event
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration
+ except socket.error:
+ raise StopIteration
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ else:
+ sock = sock_fp._sock
+ if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
+ sock = sock.socket
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
new file mode 100644
index 0000000..61857c2
--- /dev/null
+++ b/docker/types/healthcheck.py
@@ -0,0 +1,88 @@
+from .base import DictType
+
+import six
+
+
+class Healthcheck(DictType):
+ """
+ Defines a healthcheck configuration for a container or service.
+
+ Args:
+ test (:py:class:`list` or str): Test to perform to determine
+ container health. Possible values:
+
+ - Empty list: Inherit healthcheck from parent image
+ - ``["NONE"]``: Disable healthcheck
+ - ``["CMD", args...]``: exec arguments directly.
+ - ``["CMD-SHELL", command]``: RUn command in the system's
+ default shell.
+
+ If a string is provided, it will be used as a ``CMD-SHELL``
+ command.
+ interval (int): The time to wait between checks in nanoseconds. It
+ should be 0 or at least 1000000 (1 ms).
+ timeout (int): The time to wait before considering the check to
+ have hung. It should be 0 or at least 1000000 (1 ms).
+ retries (integer): The number of consecutive failures needed to
+ consider a container as unhealthy.
+ start_period (integer): Start period for the container to
+ initialize before starting health-retries countdown in
+ nanoseconds. It should be 0 or at least 1000000 (1 ms).
+ """
+ def __init__(self, **kwargs):
+ test = kwargs.get('test', kwargs.get('Test'))
+ if isinstance(test, six.string_types):
+ test = ["CMD-SHELL", test]
+
+ interval = kwargs.get('interval', kwargs.get('Interval'))
+ timeout = kwargs.get('timeout', kwargs.get('Timeout'))
+ retries = kwargs.get('retries', kwargs.get('Retries'))
+ start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
+
+ super(Healthcheck, self).__init__({
+ 'Test': test,
+ 'Interval': interval,
+ 'Timeout': timeout,
+ 'Retries': retries,
+ 'StartPeriod': start_period
+ })
+
+ @property
+ def test(self):
+ return self['Test']
+
+ @test.setter
+ def test(self, value):
+ self['Test'] = value
+
+ @property
+ def interval(self):
+ return self['Interval']
+
+ @interval.setter
+ def interval(self, value):
+ self['Interval'] = value
+
+ @property
+ def timeout(self):
+ return self['Timeout']
+
+ @timeout.setter
+ def timeout(self, value):
+ self['Timeout'] = value
+
+ @property
+ def retries(self):
+ return self['Retries']
+
+ @retries.setter
+ def retries(self, value):
+ self['Retries'] = value
+
+ @property
+ def start_period(self):
+ return self['StartPeriod']
+
+ @start_period.setter
+ def start_period(self, value):
+ self['StartPeriod'] = value
diff --git a/docker/types/networks.py b/docker/types/networks.py
new file mode 100644
index 0000000..1c7b2c9
--- /dev/null
+++ b/docker/types/networks.py
@@ -0,0 +1,111 @@
+from .. import errors
+from ..utils import normalize_links, version_lt
+
+
+class EndpointConfig(dict):
+ def __init__(self, version, aliases=None, links=None, ipv4_address=None,
+ ipv6_address=None, link_local_ips=None):
+ if version_lt(version, '1.22'):
+ raise errors.InvalidVersion(
+ 'Endpoint config is not supported for API version < 1.22'
+ )
+
+ if aliases:
+ self["Aliases"] = aliases
+
+ if links:
+ self["Links"] = normalize_links(links)
+
+ ipam_config = {}
+ if ipv4_address:
+ ipam_config['IPv4Address'] = ipv4_address
+
+ if ipv6_address:
+ ipam_config['IPv6Address'] = ipv6_address
+
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
+ if ipam_config:
+ self['IPAMConfig'] = ipam_config
+
+
+class NetworkingConfig(dict):
+ def __init__(self, endpoints_config=None):
+ if endpoints_config:
+ self["EndpointsConfig"] = endpoints_config
+
+
+class IPAMConfig(dict):
+ """
+ Create an IPAM (IP Address Management) config dictionary to be used with
+ :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
+
+ Args:
+
+ driver (str): The IPAM driver to use. Defaults to ``default``.
+ pool_configs (:py:class:`list`): A list of pool configurations
+ (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
+ options (dict): Driver options as a key-value dictionary.
+ Defaults to `None`.
+
+ Example:
+
+ >>> ipam_config = docker.types.IPAMConfig(driver='default')
+ >>> network = client.create_network('network1', ipam=ipam_config)
+
+ """
+ def __init__(self, driver='default', pool_configs=None, options=None):
+ self.update({
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ })
+
+ if options:
+ if not isinstance(options, dict):
+ raise TypeError('IPAMConfig options must be a dictionary')
+ self['Options'] = options
+
+
+class IPAMPool(dict):
+ """
+ Create an IPAM pool config dictionary to be added to the
+ ``pool_configs`` parameter of
+ :py:class:`~docker.types.IPAMConfig`.
+
+ Args:
+
+ subnet (str): Custom subnet for this IPAM pool using the CIDR
+ notation. Defaults to ``None``.
+ iprange (str): Custom IP range for endpoints in this IPAM pool using
+ the CIDR notation. Defaults to ``None``.
+ gateway (str): Custom IP address for the pool's gateway.
+ aux_addresses (dict): A dictionary of ``key -> ip_address``
+ relationships specifying auxiliary addresses that need to be
+ allocated by the IPAM driver.
+
+ Example:
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='124.42.0.0/16',
+ iprange='124.42.0.0/24',
+ gateway='124.42.0.254',
+ aux_addresses={
+ 'reserved1': '124.42.1.1'
+ }
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool])
+ """
+ def __init__(self, subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ self.update({
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ })
diff --git a/docker/types/services.py b/docker/types/services.py
new file mode 100644
index 0000000..31f4750
--- /dev/null
+++ b/docker/types/services.py
@@ -0,0 +1,715 @@
+import six
+
+from .. import errors
+from ..constants import IS_WINDOWS_PLATFORM
+from ..utils import (
+ check_resource, format_environment, format_extra_hosts, parse_bytes,
+ split_command, convert_service_networks,
+)
+
+
+class TaskTemplate(dict):
+ """
+ Describe the task specification to be used when creating or updating a
+ service.
+
+ Args:
+
+ container_spec (ContainerSpec): Container settings for containers
+ started as part of this task.
+ log_driver (DriverConfig): Log configuration for containers created as
+ part of the service.
+ resources (Resources): Resource requirements which apply to each
+ individual container created as part of the service.
+ restart_policy (RestartPolicy): Specification for the restart policy
+ which applies to containers created as part of this service.
+ placement (Placement): Placement instructions for the scheduler.
+ If a list is passed instead, it is assumed to be a list of
+ constraints as part of a :py:class:`Placement` object.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the containers to.
+ force_update (int): A counter that triggers an update even if no
+ relevant parameters have been changed.
+ """
+ def __init__(self, container_spec, resources=None, restart_policy=None,
+ placement=None, log_driver=None, networks=None,
+ force_update=None):
+ self['ContainerSpec'] = container_spec
+ if resources:
+ self['Resources'] = resources
+ if restart_policy:
+ self['RestartPolicy'] = restart_policy
+ if placement:
+ if isinstance(placement, list):
+ placement = Placement(constraints=placement)
+ self['Placement'] = placement
+ if log_driver:
+ self['LogDriver'] = log_driver
+ if networks:
+ self['Networks'] = convert_service_networks(networks)
+
+ if force_update is not None:
+ if not isinstance(force_update, int):
+ raise TypeError('force_update must be an integer')
+ self['ForceUpdate'] = force_update
+
+ @property
+ def container_spec(self):
+ return self.get('ContainerSpec')
+
+ @property
+ def resources(self):
+ return self.get('Resources')
+
+ @property
+ def restart_policy(self):
+ return self.get('RestartPolicy')
+
+ @property
+ def placement(self):
+ return self.get('Placement')
+
+
+class ContainerSpec(dict):
+ """
+ Describes the behavior of containers that are part of a task, and is used
+ when declaring a :py:class:`~docker.types.TaskTemplate`.
+
+ Args:
+
+ image (string): The image name to use for the container.
+ command (string or list): The command to be run in the image.
+ args (:py:class:`list`): Arguments to the command.
+ hostname (string): The hostname to set on the container.
+ env (dict): Environment variables.
+ workdir (string): The working directory for commands to run in.
+ user (string): The user inside the container.
+ labels (dict): A map of labels to associate with the service.
+ mounts (:py:class:`list`): A list of specifications for mounts to be
+ added to containers created as part of the service. See the
+ :py:class:`~docker.types.Mount` class for details.
+ stop_grace_period (int): Amount of time to wait for the container to
+ terminate before forcefully killing it.
+ secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
+ made available inside the containers.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ groups (:py:class:`list`): A list of additional groups that the
+ container process will run as.
+ open_stdin (boolean): Open ``stdin``
+ read_only (boolean): Mount the container's root filesystem as read
+ only.
+ stop_signal (string): Set signal to stop the service's containers
+ healthcheck (Healthcheck): Healthcheck
+ configuration for this service.
+ hosts (:py:class:`dict`): A set of host to IP mappings to add to
+ the container's ``hosts`` file.
+ dns_config (DNSConfig): Specification for DNS
+ related configurations in resolver configuration file.
+ configs (:py:class:`list`): List of :py:class:`ConfigReference` that
+ will be exposed to the service.
+ privileges (Privileges): Security options for the service's containers.
+ isolation (string): Isolation technology used by the service's
+ containers. Only used for Windows containers.
+ """
+ def __init__(self, image, command=None, args=None, hostname=None, env=None,
+ workdir=None, user=None, labels=None, mounts=None,
+ stop_grace_period=None, secrets=None, tty=None, groups=None,
+ open_stdin=None, read_only=None, stop_signal=None,
+ healthcheck=None, hosts=None, dns_config=None, configs=None,
+ privileges=None, isolation=None):
+ self['Image'] = image
+
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+ self['Command'] = command
+ self['Args'] = args
+
+ if hostname is not None:
+ self['Hostname'] = hostname
+ if env is not None:
+ if isinstance(env, dict):
+ self['Env'] = format_environment(env)
+ else:
+ self['Env'] = env
+ if workdir is not None:
+ self['Dir'] = workdir
+ if user is not None:
+ self['User'] = user
+ if groups is not None:
+ self['Groups'] = groups
+ if stop_signal is not None:
+ self['StopSignal'] = stop_signal
+ if stop_grace_period is not None:
+ self['StopGracePeriod'] = stop_grace_period
+ if labels is not None:
+ self['Labels'] = labels
+ if hosts is not None:
+ self['Hosts'] = format_extra_hosts(hosts, task=True)
+
+ if mounts is not None:
+ parsed_mounts = []
+ for mount in mounts:
+ if isinstance(mount, six.string_types):
+ parsed_mounts.append(Mount.parse_mount_string(mount))
+ else:
+ # If mount already parsed
+ parsed_mounts.append(mount)
+ self['Mounts'] = parsed_mounts
+
+ if secrets is not None:
+ if not isinstance(secrets, list):
+ raise TypeError('secrets must be a list')
+ self['Secrets'] = secrets
+
+ if configs is not None:
+ if not isinstance(configs, list):
+ raise TypeError('configs must be a list')
+ self['Configs'] = configs
+
+ if dns_config is not None:
+ self['DNSConfig'] = dns_config
+ if privileges is not None:
+ self['Privileges'] = privileges
+ if healthcheck is not None:
+ self['Healthcheck'] = healthcheck
+
+ if tty is not None:
+ self['TTY'] = tty
+ if open_stdin is not None:
+ self['OpenStdin'] = open_stdin
+ if read_only is not None:
+ self['ReadOnly'] = read_only
+
+ if isolation is not None:
+ self['Isolation'] = isolation
+
+
+class Mount(dict):
+ """
+ Describes a mounted folder's configuration inside a container. A list of
+ :py:class:`Mount` would be used as part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ target (string): Container path.
+ source (string): Mount source (e.g. a volume name or a host path).
+ type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
+ ``npipe``). Default: ``volume``.
+ read_only (bool): Whether the mount should be read-only.
+ consistency (string): The consistency requirement for the mount. One of
+ ``default```, ``consistent``, ``cached``, ``delegated``.
+ propagation (string): A propagation mode with the value ``[r]private``,
+ ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
+ no_copy (bool): False if the volume should be populated with the data
+ from the target. Default: ``False``. Only valid for the ``volume``
+ type.
+ labels (dict): User-defined name and labels for the volume. Only valid
+ for the ``volume`` type.
+ driver_config (DriverConfig): Volume driver configuration. Only valid
+ for the ``volume`` type.
+ tmpfs_size (int or string): The size for the tmpfs mount in bytes.
+ tmpfs_mode (int): The permission mode for the tmpfs mount.
+ """
+ def __init__(self, target, source, type='volume', read_only=False,
+ consistency=None, propagation=None, no_copy=False,
+ labels=None, driver_config=None, tmpfs_size=None,
+ tmpfs_mode=None):
+ self['Target'] = target
+ self['Source'] = source
+ if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
+ raise errors.InvalidArgument(
+ 'Unsupported mount type: "{}"'.format(type)
+ )
+ self['Type'] = type
+ self['ReadOnly'] = read_only
+
+ if consistency:
+ self['Consistency'] = consistency
+
+ if type == 'bind':
+ if propagation is not None:
+ self['BindOptions'] = {
+ 'Propagation': propagation
+ }
+ if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the bind '
+ 'type mount.'
+ )
+ elif type == 'volume':
+ volume_opts = {}
+ if no_copy:
+ volume_opts['NoCopy'] = True
+ if labels:
+ volume_opts['Labels'] = labels
+ if driver_config:
+ volume_opts['DriverConfig'] = driver_config
+ if volume_opts:
+ self['VolumeOptions'] = volume_opts
+ if any([propagation, tmpfs_size, tmpfs_mode]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the volume '
+ 'type mount.'
+ )
+ elif type == 'tmpfs':
+ tmpfs_opts = {}
+ if tmpfs_mode:
+ if not isinstance(tmpfs_mode, six.integer_types):
+ raise errors.InvalidArgument(
+ 'tmpfs_mode must be an integer'
+ )
+ tmpfs_opts['Mode'] = tmpfs_mode
+ if tmpfs_size:
+ tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size)
+ if tmpfs_opts:
+ self['TmpfsOptions'] = tmpfs_opts
+ if any([propagation, labels, driver_config, no_copy]):
+ raise errors.InvalidArgument(
+ 'Incompatible options have been provided for the tmpfs '
+ 'type mount.'
+ )
+
+ @classmethod
+ def parse_mount_string(cls, string):
+ parts = string.split(':')
+ if len(parts) > 3:
+ raise errors.InvalidArgument(
+ 'Invalid mount format "{0}"'.format(string)
+ )
+ if len(parts) == 1:
+ return cls(target=parts[0], source=None)
+ else:
+ target = parts[1]
+ source = parts[0]
+ mount_type = 'volume'
+ if source.startswith('/') or (
+ IS_WINDOWS_PLATFORM and source[0].isalpha() and
+ source[1] == ':'
+ ):
+ # FIXME: That windows condition will fail earlier since we
+ # split on ':'. We should look into doing a smarter split
+ # if we detect we are on Windows.
+ mount_type = 'bind'
+ read_only = not (len(parts) == 2 or parts[2] == 'rw')
+ return cls(target, source, read_only=read_only, type=mount_type)
+
+
+class Resources(dict):
+ """
+ Configures resource allocation for containers when made part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ cpu_limit (int): CPU limit in units of 10^9 CPU shares.
+ mem_limit (int): Memory limit in Bytes.
+ cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
+ mem_reservation (int): Memory reservation in Bytes.
+ generic_resources (dict or :py:class:`list`): Node level generic
+ resources, for example a GPU, using the following format:
+ ``{ resource_name: resource_value }``. Alternatively, a list of
+ of resource specifications as defined by the Engine API.
+ """
+ def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
+ mem_reservation=None, generic_resources=None):
+ limits = {}
+ reservation = {}
+ if cpu_limit is not None:
+ limits['NanoCPUs'] = cpu_limit
+ if mem_limit is not None:
+ limits['MemoryBytes'] = mem_limit
+ if cpu_reservation is not None:
+ reservation['NanoCPUs'] = cpu_reservation
+ if mem_reservation is not None:
+ reservation['MemoryBytes'] = mem_reservation
+ if generic_resources is not None:
+ reservation['GenericResources'] = (
+ _convert_generic_resources_dict(generic_resources)
+ )
+ if limits:
+ self['Limits'] = limits
+ if reservation:
+ self['Reservations'] = reservation
+
+
+def _convert_generic_resources_dict(generic_resources):
+ if isinstance(generic_resources, list):
+ return generic_resources
+ if not isinstance(generic_resources, dict):
+ raise errors.InvalidArgument(
+ 'generic_resources must be a dict or a list'
+ ' (found {})'.format(type(generic_resources))
+ )
+ resources = []
+ for kind, value in six.iteritems(generic_resources):
+ resource_type = None
+ if isinstance(value, int):
+ resource_type = 'DiscreteResourceSpec'
+ elif isinstance(value, str):
+ resource_type = 'NamedResourceSpec'
+ else:
+ raise errors.InvalidArgument(
+ 'Unsupported generic resource reservation '
+ 'type: {}'.format({kind: value})
+ )
+ resources.append({
+ resource_type: {'Kind': kind, 'Value': value}
+ })
+ return resources
+
+
+class UpdateConfig(dict):
+ """
+
+ Used to specify the way container updates should be performed by a service.
+
+ Args:
+
+ parallelism (int): Maximum number of tasks to be updated in one
+ iteration (0 means unlimited parallelism). Default: 0.
+ delay (int): Amount of time between updates.
+ failure_action (string): Action to take if an updated task fails to
+ run, or stops running during the update. Acceptable values are
+ ``continue`` and ``pause``. Default: ``continue``
+ monitor (int): Amount of time to monitor each updated task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ an update before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out an
+ updated task. Either ``start_first`` or ``stop_first`` are accepted.
+ """
+ def __init__(self, parallelism=0, delay=None, failure_action='continue',
+ monitor=None, max_failure_ratio=None, order=None):
+ self['Parallelism'] = parallelism
+ if delay is not None:
+ self['Delay'] = delay
+ if failure_action not in ('pause', 'continue'):
+ raise errors.InvalidArgument(
+ 'failure_action must be either `pause` or `continue`.'
+ )
+ self['FailureAction'] = failure_action
+
+ if monitor is not None:
+ if not isinstance(monitor, int):
+ raise TypeError('monitor must be an integer')
+ self['Monitor'] = monitor
+
+ if max_failure_ratio is not None:
+ if not isinstance(max_failure_ratio, (float, int)):
+ raise TypeError('max_failure_ratio must be a float')
+ if max_failure_ratio > 1 or max_failure_ratio < 0:
+ raise errors.InvalidArgument(
+ 'max_failure_ratio must be a number between 0 and 1'
+ )
+ self['MaxFailureRatio'] = max_failure_ratio
+
+ if order is not None:
+ if order not in ('start-first', 'stop-first'):
+ raise errors.InvalidArgument(
+ 'order must be either `start-first` or `stop-first`'
+ )
+ self['Order'] = order
+
+
+class RestartConditionTypesEnum(object):
+ _values = (
+ 'none',
+ 'on-failure',
+ 'any',
+ )
+ NONE, ON_FAILURE, ANY = _values
+
+
+class RestartPolicy(dict):
+ """
+ Used when creating a :py:class:`~docker.types.ContainerSpec`,
+ dictates whether a container should restart after stopping or failing.
+
+ Args:
+
+ condition (string): Condition for restart (``none``, ``on-failure``,
+ or ``any``). Default: `none`.
+ delay (int): Delay between restart attempts. Default: 0
+ max_attempts (int): Maximum attempts to restart a given container
+ before giving up. Default value is 0, which is ignored.
+ window (int): Time window used to evaluate the restart policy. Default
+ value is 0, which is unbounded.
+ """
+
+ condition_types = RestartConditionTypesEnum
+
+ def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
+ max_attempts=0, window=0):
+ if condition not in self.condition_types._values:
+ raise TypeError(
+ 'Invalid RestartPolicy condition {0}'.format(condition)
+ )
+
+ self['Condition'] = condition
+ self['Delay'] = delay
+ self['MaxAttempts'] = max_attempts
+ self['Window'] = window
+
+
+class DriverConfig(dict):
+ """
+ Indicates which driver to use, as well as its configuration. Can be used
+ as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
+ for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
+ as the driver object in
+ :py:meth:`create_secret`.
+
+ Args:
+
+ name (string): Name of the driver to use.
+ options (dict): Driver-specific options. Default: ``None``.
+ """
+ def __init__(self, name, options=None):
+ self['Name'] = name
+ if options:
+ self['Options'] = options
+
+
+class EndpointSpec(dict):
+ """
+ Describes properties to access and load-balance a service.
+
+ Args:
+
+ mode (string): The mode of resolution to use for internal load
+ balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
+ ``'vip'`` if not provided.
+ ports (dict): Exposed ports that this service is accessible on from the
+ outside, in the form of ``{ published_port: target_port }`` or
+ ``{ published_port: <port_config_tuple> }``. Port config tuple format
+ is ``(target_port [, protocol [, publish_mode]])``.
+ Ports can only be provided if the ``vip`` resolution mode is used.
+ """
+ def __init__(self, mode=None, ports=None):
+ if ports:
+ self['Ports'] = convert_service_ports(ports)
+ if mode:
+ self['Mode'] = mode
+
+
+def convert_service_ports(ports):
+ if isinstance(ports, list):
+ return ports
+ if not isinstance(ports, dict):
+ raise TypeError(
+ 'Invalid type for ports, expected dict or list'
+ )
+
+ result = []
+ for k, v in six.iteritems(ports):
+ port_spec = {
+ 'Protocol': 'tcp',
+ 'PublishedPort': k
+ }
+
+ if isinstance(v, tuple):
+ port_spec['TargetPort'] = v[0]
+ if len(v) >= 2 and v[1] is not None:
+ port_spec['Protocol'] = v[1]
+ if len(v) == 3:
+ port_spec['PublishMode'] = v[2]
+ if len(v) > 3:
+ raise ValueError(
+ 'Service port configuration can have at most 3 elements: '
+ '(target_port, protocol, mode)'
+ )
+ else:
+ port_spec['TargetPort'] = v
+
+ result.append(port_spec)
+ return result
+
+
+class ServiceMode(dict):
+ """
+ Indicate whether a service should be deployed as a replicated or global
+ service, and associated parameters
+
+ Args:
+ mode (string): Can be either ``replicated`` or ``global``
+ replicas (int): Number of replicas. For replicated services only.
+ """
+ def __init__(self, mode, replicas=None):
+ if mode not in ('replicated', 'global'):
+ raise errors.InvalidArgument(
+ 'mode must be either "replicated" or "global"'
+ )
+ if mode != 'replicated' and replicas is not None:
+ raise errors.InvalidArgument(
+ 'replicas can only be used for replicated mode'
+ )
+ self[mode] = {}
+ if replicas is not None:
+ self[mode]['Replicas'] = replicas
+
+ @property
+ def mode(self):
+ if 'global' in self:
+ return 'global'
+ return 'replicated'
+
+ @property
+ def replicas(self):
+ if self.mode != 'replicated':
+ return None
+ return self['replicated'].get('Replicas')
+
+
+class SecretReference(dict):
+ """
+ Secret reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a secret is made accessible inside the service's
+ containers.
+
+ Args:
+ secret_id (string): Secret's ID
+ secret_name (string): Secret's name as defined at its creation.
+ filename (string): Name of the file containing the secret. Defaults
+ to the secret's name if not specified.
+ uid (string): UID of the secret file's owner. Default: 0
+ gid (string): GID of the secret file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('secret_id')
+ def __init__(self, secret_id, secret_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['SecretName'] = secret_name
+ self['SecretID'] = secret_id
+ self['File'] = {
+ 'Name': filename or secret_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class ConfigReference(dict):
+ """
+ Config reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a config is made accessible inside the service's
+ containers.
+
+ Args:
+ config_id (string): Config's ID
+ config_name (string): Config's name as defined at its creation.
+ filename (string): Name of the file containing the config. Defaults
+ to the config's name if not specified.
+ uid (string): UID of the config file's owner. Default: 0
+ gid (string): GID of the config file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('config_id')
+ def __init__(self, config_id, config_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['ConfigName'] = config_name
+ self['ConfigID'] = config_id
+ self['File'] = {
+ 'Name': filename or config_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class Placement(dict):
+ """
+ Placement constraints to be used as part of a :py:class:`TaskTemplate`
+
+ Args:
+ constraints (:py:class:`list`): A list of constraints
+ preferences (:py:class:`list`): Preferences provide a way to make
+ the scheduler aware of factors such as topology. They are
+ provided in order from highest to lowest precedence.
+ platforms (:py:class:`list`): A list of platforms expressed as
+ ``(arch, os)`` tuples
+ """
+ def __init__(self, constraints=None, preferences=None, platforms=None):
+ if constraints is not None:
+ self['Constraints'] = constraints
+ if preferences is not None:
+ self['Preferences'] = preferences
+ if platforms:
+ self['Platforms'] = []
+ for plat in platforms:
+ self['Platforms'].append({
+ 'Architecture': plat[0], 'OS': plat[1]
+ })
+
+
+class DNSConfig(dict):
+ """
+ Specification for DNS related configurations in resolver configuration
+ file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ nameservers (:py:class:`list`): The IP addresses of the name
+ servers.
+ search (:py:class:`list`): A search list for host-name lookup.
+ options (:py:class:`list`): A list of internal resolver variables
+ to be modified (e.g., ``debug``, ``ndots:3``, etc.).
+ """
+ def __init__(self, nameservers=None, search=None, options=None):
+ self['Nameservers'] = nameservers
+ self['Search'] = search
+ self['Options'] = options
+
+
+class Privileges(dict):
+ """
+ Security options for a service's containers.
+ Part of a :py:class:`ContainerSpec` definition.
+
+ Args:
+ credentialspec_file (str): Load credential spec from this file.
+ The file is read by the daemon, and must be present in the
+ CredentialSpecs subdirectory in the docker data directory,
+ which defaults to ``C:\ProgramData\Docker\`` on Windows.
+ Can not be combined with credentialspec_registry.
+
+ credentialspec_registry (str): Load credential spec from this value
+ in the Windows registry. The specified registry value must be
+ located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
+ \Virtualization\Containers\CredentialSpecs``.
+ Can not be combined with credentialspec_file.
+
+ selinux_disable (boolean): Disable SELinux
+ selinux_user (string): SELinux user label
+ selinux_role (string): SELinux role label
+ selinux_type (string): SELinux type label
+ selinux_level (string): SELinux level label
+ """
+ def __init__(self, credentialspec_file=None, credentialspec_registry=None,
+ selinux_disable=None, selinux_user=None, selinux_role=None,
+ selinux_type=None, selinux_level=None):
+ credential_spec = {}
+ if credentialspec_registry is not None:
+ credential_spec['Registry'] = credentialspec_registry
+ if credentialspec_file is not None:
+ credential_spec['File'] = credentialspec_file
+
+ if len(credential_spec) > 1:
+ raise errors.InvalidArgument(
+ 'credentialspec_file and credentialspec_registry are mutually'
+ ' exclusive'
+ )
+
+ selinux_context = {
+ 'Disable': selinux_disable,
+ 'User': selinux_user,
+ 'Role': selinux_role,
+ 'Type': selinux_type,
+ 'Level': selinux_level,
+ }
+
+ if len(credential_spec) > 0:
+ self['CredentialSpec'] = credential_spec
+
+ if len(selinux_context) > 0:
+ self['SELinuxContext'] = selinux_context
diff --git a/docker/types/swarm.py b/docker/types/swarm.py
new file mode 100644
index 0000000..9687a82
--- /dev/null
+++ b/docker/types/swarm.py
@@ -0,0 +1,119 @@
+from ..errors import InvalidVersion
+from ..utils import version_lt
+
+
+class SwarmSpec(dict):
+ """
+ Describe a Swarm's configuration and options. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
+ to instantiate.
+ """
+ def __init__(self, version, task_history_retention_limit=None,
+ snapshot_interval=None, keep_old_snapshots=None,
+ log_entries_for_slow_followers=None, heartbeat_tick=None,
+ election_tick=None, dispatcher_heartbeat_period=None,
+ node_cert_expiry=None, external_cas=None, name=None,
+ labels=None, signing_ca_cert=None, signing_ca_key=None,
+ ca_force_rotate=None, autolock_managers=None,
+ log_driver=None):
+ if task_history_retention_limit is not None:
+ self['Orchestration'] = {
+ 'TaskHistoryRetentionLimit': task_history_retention_limit
+ }
+ if any([snapshot_interval,
+ keep_old_snapshots,
+ log_entries_for_slow_followers,
+ heartbeat_tick,
+ election_tick]):
+ self['Raft'] = {
+ 'SnapshotInterval': snapshot_interval,
+ 'KeepOldSnapshots': keep_old_snapshots,
+ 'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
+ 'HeartbeatTick': heartbeat_tick,
+ 'ElectionTick': election_tick
+ }
+
+ if dispatcher_heartbeat_period:
+ self['Dispatcher'] = {
+ 'HeartbeatPeriod': dispatcher_heartbeat_period
+ }
+
+ ca_config = {}
+ if node_cert_expiry is not None:
+ ca_config['NodeCertExpiry'] = node_cert_expiry
+ if external_cas:
+ if version_lt(version, '1.25'):
+ if len(external_cas) > 1:
+ raise InvalidVersion(
+ 'Support for multiple external CAs is not available '
+ 'for API version < 1.25'
+ )
+ ca_config['ExternalCA'] = external_cas[0]
+ else:
+ ca_config['ExternalCAs'] = external_cas
+ if signing_ca_key:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_key is not supported in API version < 1.30'
+ )
+ ca_config['SigningCAKey'] = signing_ca_key
+ if signing_ca_cert:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'signing_ca_cert is not supported in API version < 1.30'
+ )
+ ca_config['SigningCACert'] = signing_ca_cert
+ if ca_force_rotate is not None:
+ if version_lt(version, '1.30'):
+ raise InvalidVersion(
+ 'force_rotate is not supported in API version < 1.30'
+ )
+ ca_config['ForceRotate'] = ca_force_rotate
+ if ca_config:
+ self['CAConfig'] = ca_config
+
+ if autolock_managers is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'autolock_managers is not supported in API version < 1.25'
+ )
+
+ self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers}
+
+ if log_driver is not None:
+ if version_lt(version, '1.25'):
+ raise InvalidVersion(
+ 'log_driver is not supported in API version < 1.25'
+ )
+
+ self['TaskDefaults'] = {'LogDriver': log_driver}
+
+ if name is not None:
+ self['Name'] = name
+ if labels is not None:
+ self['Labels'] = labels
+
+
+class SwarmExternalCA(dict):
+ """
+ Configuration for forwarding signing requests to an external
+ certificate authority.
+
+ Args:
+ url (string): URL where certificate signing requests should be
+ sent.
+ protocol (string): Protocol for communication with the external CA.
+ options (dict): An object with key/value pairs that are interpreted
+ as protocol-specific options for the external CA driver.
+ ca_cert (string): The root CA certificate (in PEM format) this
+ external CA uses to issue TLS certificates (assumed to be to
+ the current swarm root CA certificate if not provided).
+
+
+
+ """
+ def __init__(self, url, protocol=None, options=None, ca_cert=None):
+ self['URL'] = url
+ self['Protocol'] = protocol
+ self['Options'] = options
+ self['CACert'] = ca_cert
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
new file mode 100644
index 0000000..81c8186
--- /dev/null
+++ b/docker/utils/__init__.py
@@ -0,0 +1,13 @@
+# flake8: noqa
+from .build import create_archive, exclude_paths, mkbuildcontext, tar
+from .decorators import check_resource, minimum_version, update_headers
+from .utils import (
+ compare_version, convert_port_bindings, convert_volume_binds,
+ parse_repository_tag, parse_host,
+ kwargs_from_env, convert_filters, datetime_to_timestamp,
+ create_host_config, parse_bytes, parse_env_file, version_lt,
+ version_gte, decode_json_header, split_command, create_ipam_config,
+ create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
+ format_environment, format_extra_hosts
+)
+
diff --git a/docker/utils/build.py b/docker/utils/build.py
new file mode 100644
index 0000000..4fa5751
--- /dev/null
+++ b/docker/utils/build.py
@@ -0,0 +1,255 @@
+import io
+import os
+import re
+import tarfile
+import tempfile
+
+import six
+
+from .fnmatch import fnmatch
+from ..constants import IS_WINDOWS_PLATFORM
+
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ patterns.append('!' + dockerfile)
+ pm = PatternMatcher(patterns)
+ return set(pm.walk(root))
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = set(e[0] for e in extra_files)
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ raise IOError(
+ 'Can not read file in context: {}'.format(full_path)
+ )
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ info.size = len(contents)
+ t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ if six.PY3:
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ else:
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ else:
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
+
+
+def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+
+def normalize_slashes(p):
+ if IS_WINDOWS_PLATFORM:
+ return '/'.join(split_path(p))
+ return p
+
+
+def walk(root, patterns, default=True):
+ pm = PatternMatcher(patterns)
+ return pm.walk(root)
+
+
+# Heavily based on
+# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
+class PatternMatcher(object):
+ def __init__(self, patterns):
+ self.patterns = list(filter(
+ lambda p: p.dirs, [Pattern(p) for p in patterns]
+ ))
+ self.patterns.append(Pattern('!.dockerignore'))
+
+ def matches(self, filepath):
+ matched = False
+ parent_path = os.path.dirname(filepath)
+ parent_path_dirs = split_path(parent_path)
+
+ for pattern in self.patterns:
+ negative = pattern.exclusion
+ match = pattern.match(filepath)
+ if not match and parent_path != '':
+ if len(pattern.dirs) <= len(parent_path_dirs):
+ match = pattern.match(
+ os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
+ )
+
+ if match:
+ matched = not negative
+
+ return matched
+
+ def walk(self, root):
+ def rec_walk(current_dir):
+ for f in os.listdir(current_dir):
+ fpath = os.path.join(
+ os.path.relpath(current_dir, root), f
+ )
+ if fpath.startswith('.' + os.path.sep):
+ fpath = fpath[2:]
+ match = self.matches(fpath)
+ if not match:
+ yield fpath
+
+ cur = os.path.join(root, fpath)
+ if not os.path.isdir(cur) or os.path.islink(cur):
+ continue
+
+ if match:
+ # If we want to skip this file and it's a directory
+ # then we should first check to see if there's an
+ # excludes pattern (e.g. !dir/file) that starts with this
+ # dir. If so then we can't skip this dir.
+ skip = True
+
+ for pat in self.patterns:
+ if not pat.exclusion:
+ continue
+ if pat.cleaned_pattern.startswith(
+ normalize_slashes(fpath)):
+ skip = False
+ break
+ if skip:
+ continue
+ for sub in rec_walk(cur):
+ yield sub
+
+ return rec_walk(root)
+
+
+class Pattern(object):
+ def __init__(self, pattern_str):
+ self.exclusion = False
+ if pattern_str.startswith('!'):
+ self.exclusion = True
+ pattern_str = pattern_str[1:]
+
+ self.dirs = self.normalize(pattern_str)
+ self.cleaned_pattern = '/'.join(self.dirs)
+
+ @classmethod
+ def normalize(cls, p):
+
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ def match(self, filepath):
+ return fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
diff --git a/docker/utils/config.py b/docker/utils/config.py
new file mode 100644
index 0000000..82a0e2a
--- /dev/null
+++ b/docker/utils/config.py
@@ -0,0 +1,66 @@
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug("Trying paths: {0}".format(repr(paths)))
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug("Found file at path: {0}".format(path))
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (IOError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
new file mode 100644
index 0000000..c975d4b
--- /dev/null
+++ b/docker/utils/decorators.py
@@ -0,0 +1,47 @@
+import functools
+
+from .. import errors
+from . import utils
+
+
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def minimum_version(version):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if utils.version_lt(self._version, version):
+ raise errors.InvalidVersion(
+ '{0} is not available for version < {1}'.format(
+ f.__name__, version
+ )
+ )
+ return f(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def update_headers(f):
+ def inner(self, *args, **kwargs):
+ if 'HttpHeaders' in self._general_configs:
+ if not kwargs.get('headers'):
+ kwargs['headers'] = self._general_configs['HttpHeaders']
+ else:
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
+ return f(self, *args, **kwargs)
+ return inner
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
new file mode 100644
index 0000000..cc940a2
--- /dev/null
+++ b/docker/utils/fnmatch.py
@@ -0,0 +1,115 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ i, n = 0, len(pat)
+ res = '^'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ # is some flavor of "**"
+ i = i + 1
+ # Treat **/ as ** so eat the "/"
+ if i < n and pat[i] == '/':
+ i = i + 1
+ if i >= n:
+ # is "**EOF" - to align with .gitignore just accept all
+ res = res + '.*'
+ else:
+ # is "**"
+ # Note that this allows for any # of /'s (even 0) because
+ # the .* will eat everything, even /'s
+ res = res + '(.*/)?'
+ else:
+ # is "*" so map it to anything but "/"
+ res = res + '[^/]*'
+ elif c == '?':
+ # "?" is any char except "/"
+ res = res + '[^/]'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ else:
+ res = res + re.escape(c)
+
+ return res + '$'
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
new file mode 100644
index 0000000..addffdf
--- /dev/null
+++ b/docker/utils/json_stream.py
@@ -0,0 +1,80 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import json.decoder
+
+import six
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
new file mode 100644
index 0000000..bf7d697
--- /dev/null
+++ b/docker/utils/ports.py
@@ -0,0 +1,83 @@
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ "((?P<host>[a-fA-F\d.:]+):)?" # Address
+ "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return ['{}-{}'.format(start, end) + proto]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
new file mode 100644
index 0000000..7b96d4f
--- /dev/null
+++ b/docker/utils/socket.py
@@ -0,0 +1,100 @@
+import errno
+import os
+import select
+import socket as pysocket
+import struct
+
+import six
+
+try:
+ from ..transport import NpipeSocket
+except ImportError:
+ NpipeSocket = type(None)
+
+
+class SocketError(Exception):
+ pass
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ if six.PY3 and not isinstance(socket, NpipeSocket):
+ select.select([socket], [], [])
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ return socket.read(n)
+ return os.read(socket.fileno(), n)
+ except EnvironmentError as e:
+ if e.errno not in recoverable_errors:
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = six.binary_type()
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_size(socket):
+ """
+ Returns the size of the next frame of data waiting to be read from socket,
+ according to the protocol defined here:
+
+ https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return -1
+
+ _, actual = struct.unpack('>BxxxL', data)
+ return actual
+
+
+def frames_iter(socket):
+ """
+ Returns a generator of frames read from socket
+ """
+ while True:
+ n = next_frame_size(socket)
+ if n < 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ if result is None:
+ continue
+ data_length = len(result)
+ if data_length == 0:
+ # We have reached EOF
+ return
+ n -= data_length
+ yield result
+
+
+def socket_raw_iter(socket):
+ """
+ Returns a generator of data read from the socket.
+ This is used for non-multiplexed streams.
+ """
+ while True:
+ result = read(socket)
+ if len(result) == 0:
+ # We have reached EOF
+ return
+ yield result
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
new file mode 100644
index 0000000..fe3b9a5
--- /dev/null
+++ b/docker/utils/utils.py
@@ -0,0 +1,489 @@
+import base64
+import os
+import os.path
+import json
+import shlex
+from distutils.version import StrictVersion
+from datetime import datetime
+
+import six
+
+from .. import errors
+from .. import tls
+
+if six.PY2:
+ from urllib import splitnport
+else:
+ from urllib.parse import splitnport
+
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
+
+
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
+
+
+def decode_json_header(header):
+ data = base64.b64decode(header)
+ if six.PY3:
+ data = data.decode('utf-8')
+ return json.loads(data)
+
+
+def compare_version(v1, v2):
+ """Compare docker versions
+
+ >>> v1 = '1.9'
+ >>> v2 = '1.10'
+ >>> compare_version(v1, v2)
+ 1
+ >>> compare_version(v2, v1)
+ -1
+ >>> compare_version(v2, v2)
+ 0
+ """
+ s1 = StrictVersion(v1)
+ s2 = StrictVersion(v2)
+ if s1 == s2:
+ return 0
+ elif s1 > s2:
+ return -1
+ else:
+ return 1
+
+
+def version_lt(v1, v2):
+ return compare_version(v1, v2) > 0
+
+
+def version_gte(v1, v2):
+ return not version_lt(v1, v2)
+
+
+def _convert_port_binding(binding):
+ result = {'HostIp': '', 'HostPort': ''}
+ if isinstance(binding, tuple):
+ if len(binding) == 2:
+ result['HostPort'] = binding[1]
+ result['HostIp'] = binding[0]
+ elif isinstance(binding[0], six.string_types):
+ result['HostIp'] = binding[0]
+ else:
+ result['HostPort'] = binding[0]
+ elif isinstance(binding, dict):
+ if 'HostPort' in binding:
+ result['HostPort'] = binding['HostPort']
+ if 'HostIp' in binding:
+ result['HostIp'] = binding['HostIp']
+ else:
+ raise ValueError(binding)
+ else:
+ result['HostPort'] = binding
+
+ if result['HostPort'] is None:
+ result['HostPort'] = ''
+ else:
+ result['HostPort'] = str(result['HostPort'])
+
+ return result
+
+
+def convert_port_bindings(port_bindings):
+ result = {}
+ for k, v in six.iteritems(port_bindings):
+ key = str(k)
+ if '/' not in key:
+ key += '/tcp'
+ if isinstance(v, list):
+ result[key] = [_convert_port_binding(binding) for binding in v]
+ else:
+ result[key] = [_convert_port_binding(v)]
+ return result
+
+
+def convert_volume_binds(binds):
+ if isinstance(binds, list):
+ return binds
+
+ result = []
+ for k, v in binds.items():
+ if isinstance(k, six.binary_type):
+ k = k.decode('utf-8')
+
+ if isinstance(v, dict):
+ if 'ro' in v and 'mode' in v:
+ raise ValueError(
+ 'Binding cannot contain both "ro" and "mode": {}'
+ .format(repr(v))
+ )
+
+ bind = v['bind']
+ if isinstance(bind, six.binary_type):
+ bind = bind.decode('utf-8')
+
+ if 'ro' in v:
+ mode = 'ro' if v['ro'] else 'rw'
+ elif 'mode' in v:
+ mode = v['mode']
+ else:
+ mode = 'rw'
+
+ result.append(
+ six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ )
+ else:
+ if isinstance(v, six.binary_type):
+ v = v.decode('utf-8')
+ result.append(
+ six.text_type('{0}:{1}:rw').format(k, v)
+ )
+ return result
+
+
+def convert_tmpfs_mounts(tmpfs):
+ if isinstance(tmpfs, dict):
+ return tmpfs
+
+ if not isinstance(tmpfs, list):
+ raise ValueError(
+ 'Expected tmpfs value to be either a list or a dict, found: {}'
+ .format(type(tmpfs).__name__)
+ )
+
+ result = {}
+ for mount in tmpfs:
+ if isinstance(mount, six.string_types):
+ if ":" in mount:
+ name, options = mount.split(":", 1)
+ else:
+ name = mount
+ options = ""
+
+ else:
+ raise ValueError(
+ "Expected item in tmpfs list to be a string, found: {}"
+ .format(type(mount).__name__)
+ )
+
+ result[name] = options
+ return result
+
+
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, six.string_types):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
+
+
+# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
+# fd:// protocol unsupported (for obvious reasons)
+# Added support for http and https
+# Protocol translation: tcp -> http, unix -> http+unix
+def parse_host(addr, is_win32=False, tls=False):
+ proto = "http+unix"
+ port = None
+ path = ''
+
+ if not addr and is_win32:
+ addr = DEFAULT_NPIPE
+
+ if not addr or addr.strip() == 'unix://':
+ return DEFAULT_UNIX_SOCKET
+
+ addr = addr.strip()
+ if addr.startswith('http://'):
+ addr = addr.replace('http://', 'tcp://')
+ if addr.startswith('http+unix://'):
+ addr = addr.replace('http+unix://', 'unix://')
+
+ if addr == 'tcp://':
+ raise errors.DockerException(
+ "Invalid bind address format: {0}".format(addr)
+ )
+ elif addr.startswith('unix://'):
+ addr = addr[7:]
+ elif addr.startswith('tcp://'):
+ proto = 'http{0}'.format('s' if tls else '')
+ addr = addr[6:]
+ elif addr.startswith('https://'):
+ proto = "https"
+ addr = addr[8:]
+ elif addr.startswith('npipe://'):
+ proto = 'npipe'
+ addr = addr[8:]
+ elif addr.startswith('fd://'):
+ raise errors.DockerException("fd protocol is not implemented")
+ else:
+ if "://" in addr:
+ raise errors.DockerException(
+ "Invalid bind address protocol: {0}".format(addr)
+ )
+ proto = "https" if tls else "http"
+
+ if proto in ("http", "https"):
+ address_parts = addr.split('/', 1)
+ host = address_parts[0]
+ if len(address_parts) == 2:
+ path = '/' + address_parts[1]
+ host, port = splitnport(host)
+
+ if port is None:
+ raise errors.DockerException(
+ "Invalid port: {0}".format(addr)
+ )
+
+ if not host:
+ host = DEFAULT_HTTP_HOST
+ else:
+ host = addr
+
+ if proto in ("http", "https") and port == -1:
+ raise errors.DockerException(
+ "Bind address needs a port: {0}".format(addr))
+
+ if proto == "http+unix" or proto == 'npipe':
+ return "{0}://{1}".format(proto, host).rstrip('/')
+ return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
+
+
+def parse_devices(devices):
+ device_list = []
+ for device in devices:
+ if isinstance(device, dict):
+ device_list.append(device)
+ continue
+ if not isinstance(device, six.string_types):
+ raise errors.DockerException(
+ 'Invalid device type {0}'.format(type(device))
+ )
+ device_mapping = device.split(':')
+ if device_mapping:
+ path_on_host = device_mapping[0]
+ if len(device_mapping) > 1:
+ path_in_container = device_mapping[1]
+ else:
+ path_in_container = path_on_host
+ if len(device_mapping) > 2:
+ permissions = device_mapping[2]
+ else:
+ permissions = 'rwm'
+ device_list.append({
+ 'PathOnHost': path_on_host,
+ 'PathInContainer': path_in_container,
+ 'CgroupPermissions': permissions
+ })
+ return device_list
+
+
+def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
+ if not environment:
+ environment = os.environ
+ host = environment.get('DOCKER_HOST')
+
+ # empty string for cert path is the same as unset.
+ cert_path = environment.get('DOCKER_CERT_PATH') or None
+
+ # empty string for tls verify counts as "false".
+ # Any value or 'unset' counts as true.
+ tls_verify = environment.get('DOCKER_TLS_VERIFY')
+ if tls_verify == '':
+ tls_verify = False
+ else:
+ tls_verify = tls_verify is not None
+ enable_tls = cert_path or tls_verify
+
+ params = {}
+
+ if host:
+ params['base_url'] = (
+ host.replace('tcp://', 'https://') if enable_tls else host
+ )
+
+ if not enable_tls:
+ return params
+
+ if not cert_path:
+ cert_path = os.path.join(os.path.expanduser('~'), '.docker')
+
+ if not tls_verify and assert_hostname is None:
+ # assert_hostname is a subset of TLS verification,
+ # so if it's not set already then set it to false.
+ assert_hostname = False
+
+ params['tls'] = tls.TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=tls_verify,
+ ssl_version=ssl_version,
+ assert_hostname=assert_hostname,
+ )
+
+ return params
+
+
+def convert_filters(filters):
+ result = {}
+ for k, v in six.iteritems(filters):
+ if isinstance(v, bool):
+ v = 'true' if v else 'false'
+ if not isinstance(v, list):
+ v = [v, ]
+ result[k] = v
+ return json.dumps(result)
+
+
+def datetime_to_timestamp(dt):
+ """Convert a UTC datetime to a Unix timestamp"""
+ delta = dt - datetime.utcfromtimestamp(0)
+ return delta.seconds + delta.days * 24 * 3600
+
+
+def parse_bytes(s):
+ if isinstance(s, six.integer_types + (float,)):
+ return s
+ if len(s) == 0:
+ return 0
+
+ if s[-2:-1].isalpha() and s[-1].isalpha():
+ if s[-1] == "b" or s[-1] == "B":
+ s = s[:-1]
+ units = BYTE_UNITS
+ suffix = s[-1].lower()
+
+ # Check if the variable is a string representation of an int
+ # without a units part. Assuming that the units are bytes.
+ if suffix.isdigit():
+ digits_part = s
+ suffix = 'b'
+ else:
+ digits_part = s[:-1]
+
+ if suffix in units.keys() or suffix.isdigit():
+ try:
+ digits = int(digits_part)
+ except ValueError:
+ raise errors.DockerException(
+ 'Failed converting the string value for memory ({0}) to'
+ ' an integer.'.format(digits_part)
+ )
+
+ # Reconvert to long for the final result
+ s = int(digits * units[suffix])
+ else:
+ raise errors.DockerException(
+ 'The specified value for memory ({0}) should specify the'
+ ' units. The postfix should be one of the `b` `k` `m` `g`'
+ ' characters'.format(s)
+ )
+
+ return s
+
+
+def normalize_links(links):
+ if isinstance(links, dict):
+ links = six.iteritems(links)
+
+ return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
+
+
+def parse_env_file(env_file):
+ """
+ Reads a line-separated environment file.
+ The format of each line should be "key=value".
+ """
+ environment = {}
+
+ with open(env_file, 'r') as f:
+ for line in f:
+
+ if line[0] == '#':
+ continue
+
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
+ if len(parse_line) == 2:
+ k, v = parse_line
+ environment[k] = v
+ else:
+ raise errors.DockerException(
+ 'Invalid line in environment file {0}:\n{1}'.format(
+ env_file, line))
+
+ return environment
+
+
+def split_command(command):
+ if six.PY2 and not isinstance(command, six.binary_type):
+ command = command.encode('utf-8')
+ return shlex.split(command)
+
+
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, six.binary_type):
+ value = value.decode('utf-8')
+
+ return u'{key}={value}'.format(key=key, value=value)
+ return [format_env(*var) for var in six.iteritems(environment)]
+
+
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+ return [
+ '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/docker/version.py b/docker/version.py
new file mode 100644
index 0000000..d451374
--- /dev/null
+++ b/docker/version.py
@@ -0,0 +1,2 @@
+version = "3.4.1"
+version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..6c5e7d0
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,18 @@
+appdirs==1.4.3
+asn1crypto==0.22.0
+backports.ssl-match-hostname==3.5.0.1
+cffi==1.10.0
+cryptography==1.9
+docker-pycreds==0.3.0
+enum34==1.1.6
+idna==2.5
+ipaddress==1.0.18
+packaging==16.8
+pycparser==2.17
+pyOpenSSL==17.0.0
+pyparsing==2.2.0
+pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
+pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
+requests==2.14.2
+six==1.10.0
+websocket-client==0.40.0
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..95b126b
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,11 @@
+[bdist_wheel]
+universal = 1
+
+[metadata]
+description_file = README.rst
+license = Apache License 2.0
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..57b2b5a
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+from __future__ import print_function
+
+import codecs
+import os
+
+from setuptools import setup, find_packages
+
+ROOT_DIR = os.path.dirname(__file__)
+SOURCE_DIR = os.path.join(ROOT_DIR)
+
+requirements = [
+ 'requests >= 2.14.2, != 2.18.0',
+ 'six >= 1.4.0',
+ 'websocket-client >= 0.32.0',
+ 'docker-pycreds >= 0.3.0'
+]
+
+extras_require = {
+ ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
+ # While not imported explicitly, the ipaddress module is required for
+ # ssl_match_hostname to verify hosts match with certificates via
+ # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
+ ':python_version < "3.3"': 'ipaddress >= 1.0.16',
+
+ # win32 APIs if on Windows (required for npipe support)
+ # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported
+ # on v220 ; ALL versions are broken for v222 (as of 2018-01-26)
+ ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219',
+ ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==220',
+
+ # If using docker-py over TLS, highly recommend this option is
+ # pip-installed or pinned.
+
+ # TODO: if pip installing both "requests" and "requests[security]", the
+ # extra package from the "security" option are not installed (see
+ # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
+ # installing the extra dependencies, install the following instead:
+ # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
+ 'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+}
+
+version = None
+exec(open('docker/version.py').read())
+
+with open('./test-requirements.txt') as test_reqs_txt:
+ test_requirements = [line for line in test_reqs_txt]
+
+
+long_description = ''
+try:
+ with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
+ long_description = readme_rst.read()
+except IOError:
+ # README.rst is only generated on release. Its absence should not prevent
+ # setup.py from working properly.
+ pass
+
+setup(
+ name="docker",
+ version=version,
+ description="A Python library for the Docker Engine API.",
+ long_description=long_description,
+ url='https://github.com/docker/docker-py',
+ packages=find_packages(exclude=["tests.*", "tests"]),
+ install_requires=requirements,
+ tests_require=test_requirements,
+ extras_require=extras_require,
+ zip_safe=False,
+ test_suite='tests',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Other Environment',
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Topic :: Utilities',
+ 'License :: OSI Approved :: Apache Software License',
+ ],
+ maintainer='Joffrey F',
+ maintainer_email='joffrey@docker.com',
+)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..09680b6
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,6 @@
+coverage==3.7.1
+flake8==3.4.1
+mock==1.0.1
+pytest==2.9.1
+pytest-cov==2.1.0
+pytest-timeout==1.2.1
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 0000000..b36d6d7
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,138 @@
+import functools
+import os
+import os.path
+import random
+import tarfile
+import tempfile
+import time
+import re
+import six
+import socket
+
+import docker
+import pytest
+
+
+def make_tree(dirs, files):
+ base = tempfile.mkdtemp()
+
+ for path in dirs:
+ os.makedirs(os.path.join(base, path))
+
+ for path in files:
+ with open(os.path.join(base, path), 'w') as f:
+ f.write("content")
+
+ return base
+
+
+def simple_tar(path):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+
+ abs_path = os.path.abspath(path)
+ t.add(abs_path, arcname=os.path.basename(path), recursive=False)
+
+ t.close()
+ f.seek(0)
+ return f
+
+
+def untar_file(tardata, filename):
+ with tarfile.open(mode='r', fileobj=tardata) as t:
+ f = t.extractfile(filename)
+ result = f.read()
+ f.close()
+ return result
+
+
+def requires_api_version(version):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ return pytest.mark.skipif(
+ docker.utils.version_lt(test_version, version),
+ reason="API version is too low (< {0})".format(version)
+ )
+
+
+def requires_experimental(until=None):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ def req_exp(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if not self.client.info()['ExperimentalBuild']:
+ pytest.skip('Feature requires Docker Engine experimental mode')
+ return f(self, *args, **kwargs)
+
+ if until and docker.utils.version_gte(test_version, until):
+ return f
+ return wrapped
+
+ return req_exp
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def random_name():
+ return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+
+def force_leave_swarm(client):
+ """Actually force leave a Swarm. There seems to be a bug in Swarm that
+ occasionally throws "context deadline exceeded" errors when leaving."""
+ while True:
+ try:
+ if isinstance(client, docker.DockerClient):
+ return client.swarm.leave(force=True)
+ return client.leave_swarm(force=True) # elif APIClient
+ except docker.errors.APIError as e:
+ if e.explanation == "context deadline exceeded":
+ continue
+ else:
+ return
+
+
+def swarm_listen_addr():
+ return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
+
+
+def assert_cat_socket_detached_with_keys(sock, inputs):
+ if six.PY3 and hasattr(sock, '_sock'):
+ sock = sock._sock
+
+ for i in inputs:
+ sock.sendall(i)
+ time.sleep(0.5)
+
+ # If we're using a Unix socket, the sock.send call will fail with a
+ # BrokenPipeError ; INET sockets will just stop receiving / sending data
+ # but will not raise an error
+ if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
+ with pytest.raises(socket.error):
+ sock.sendall(b'make sure the socket is closed\n')
+ else:
+ sock.sendall(b"make sure the socket is closed\n")
+ data = sock.recv(128)
+ # New in 18.06: error message is broadcast over the socket when reading
+ # after detach
+ assert data == b'' or data.startswith(
+ b'exec attach failed: error on attach stdin: read escape sequence'
+ )
+
+
+def ctrl_with(char):
+ if re.match('[a-z]', char):
+ return chr(ord(char) - ord('a') + 1).encode('ascii')
+ else:
+ raise(Exception('char must be [a-z]'))
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/__init__.py
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
new file mode 100644
index 0000000..baaf33e
--- /dev/null
+++ b/tests/integration/api_build_test.py
@@ -0,0 +1,546 @@
+import io
+import os
+import shutil
+import tempfile
+
+from docker import errors
+
+import pytest
+import six
+
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import random_name, requires_api_version, requires_experimental
+
+
+class BuildTest(BaseAPIIntegrationTest):
+ def test_build_streaming(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ stream = self.client.build(fileobj=script, decode=True)
+ logs = []
+ for chunk in stream:
+ logs.append(chunk)
+ assert len(logs) > 0
+
+ def test_build_from_stringio(self):
+ if six.PY3:
+ return
+ script = io.StringIO(six.text_type('\n').join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]))
+ stream = self.client.build(fileobj=script)
+ logs = ''
+ for chunk in stream:
+ if six.PY3:
+ chunk = chunk.decode('utf-8')
+ logs += chunk
+ assert logs != ''
+
+ def test_build_with_dockerignore(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write("\n".join([
+ 'ignored',
+ 'Dockerfile',
+ '.dockerignore',
+ '!ignored/subdir/excepted-file',
+ '', # empty line,
+ '#*', # comment line
+ ]))
+
+ with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
+ f.write('this file should not be ignored')
+
+ subdir = os.path.join(base_dir, 'ignored', 'subdir')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'file'), 'w') as f:
+ f.write("this file should be ignored")
+
+ with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
+ f.write("this file should not be ignored")
+
+ tag = 'docker-py-test-build-with-dockerignore'
+ stream = self.client.build(
+ path=base_dir,
+ tag=tag,
+ )
+ for chunk in stream:
+ pass
+
+ c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
+ self.client.start(c)
+ self.client.wait(c)
+ logs = self.client.logs(c)
+
+ if six.PY3:
+ logs = logs.decode('utf-8')
+
+ assert sorted(list(filter(None, logs.split('\n')))) == sorted([
+ '/test/#file.txt',
+ '/test/ignored/subdir/excepted-file',
+ '/test/not-ignored'
+ ])
+
+ def test_build_with_buildargs(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'ARG test',
+ 'USER $test'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
+ )
+ self.tmp_imgs.append('buildargs')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('buildargs')
+ assert info['Config']['User'] == 'OK'
+
+ @requires_api_version('1.22')
+ def test_build_shmsize(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Hello, World!\'"',
+ ]).encode('ascii'))
+
+ tag = 'shmsize'
+ shmsize = 134217728
+
+ stream = self.client.build(
+ fileobj=script, tag=tag, shmsize=shmsize
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ # There is currently no way to get the shmsize
+ # that was used to build the image
+
+ @requires_api_version('1.24')
+ def test_build_isolation(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Deaf To All But The Song\''
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag='isolation',
+ isolation='default'
+ )
+
+ for chunk in stream:
+ pass
+
+ @requires_api_version('1.23')
+ def test_build_labels(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ ]).encode('ascii'))
+
+ labels = {'test': 'OK'}
+
+ stream = self.client.build(
+ fileobj=script, tag='labels', labels=labels
+ )
+ self.tmp_imgs.append('labels')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('labels')
+ assert info['Config']['Labels'] == labels
+
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
+ @requires_api_version('1.29')
+ def test_build_container_with_target(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox as first',
+ 'RUN mkdir -p /tmp/test',
+ 'RUN touch /tmp/silence.tar.gz',
+ 'FROM alpine:latest',
+ 'WORKDIR /root/'
+ 'COPY --from=first /tmp/silence.tar.gz .',
+ 'ONBUILD RUN echo "This should not be in the final image"'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, target='first', tag='build1'
+ )
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('build1')
+ assert not info['Config']['OnBuild']
+
+ @requires_api_version('1.25')
+ def test_build_with_network_mode(self):
+ # Set up pingable endpoint on custom network
+ network = self.client.create_network(random_name())['Id']
+ self.tmp_networks.append(network)
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.connect_container_to_network(
+ container, network, aliases=['pingtarget.docker']
+ )
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 pingtarget.docker'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, network_mode=network,
+ tag='dockerpytest_customnetbuild'
+ )
+
+ self.tmp_imgs.append('dockerpytest_customnetbuild')
+ for chunk in stream:
+ pass
+
+ assert self.client.inspect_image('dockerpytest_customnetbuild')
+
+ script.seek(0)
+ stream = self.client.build(
+ fileobj=script, network_mode='none',
+ tag='dockerpytest_nonebuild', nocache=True, decode=True
+ )
+
+ self.tmp_imgs.append('dockerpytest_nonebuild')
+ logs = [chunk for chunk in stream]
+ assert 'errorDetail' in logs[-1]
+ assert logs[-1]['errorDetail']['code'] == 1
+
+ with pytest.raises(errors.NotFound):
+ self.client.inspect_image('dockerpytest_nonebuild')
+
+ @requires_api_version('1.27')
+ def test_build_with_extra_hosts(self):
+ img_name = 'dockerpytest_extrahost_build'
+ self.tmp_imgs.append(img_name)
+
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 hello.world.test',
+ 'RUN ping -c1 extrahost.local.test',
+ 'RUN cp /etc/hosts /hosts-file'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, tag=img_name,
+ extra_hosts={
+ 'extrahost.local.test': '127.0.0.1',
+ 'hello.world.test': '127.0.0.1',
+ }, decode=True
+ )
+ for chunk in stream:
+ if 'errorDetail' in chunk:
+ pytest.fail(chunk)
+
+ assert self.client.inspect_image(img_name)
+ ctnr = self.run_container(img_name, 'cat /hosts-file')
+ self.tmp_containers.append(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert '127.0.0.1\textrahost.local.test' in logs
+ assert '127.0.0.1\thello.world.test' in logs
+
+ @requires_experimental(until=None)
+ @requires_api_version('1.25')
+ def test_build_squash(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN echo blah > /file_1',
+ 'RUN echo blahblah > /file_2',
+ 'RUN echo blahblahblah > /file_3'
+ ]).encode('ascii'))
+
+ def build_squashed(squash):
+ tag = 'squash' if squash else 'nosquash'
+ stream = self.client.build(
+ fileobj=script, tag=tag, squash=squash
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ return self.client.inspect_image(tag)
+
+ non_squashed = build_squashed(False)
+ squashed = build_squashed(True)
+ assert len(non_squashed['RootFS']['Layers']) == 4
+ assert len(squashed['RootFS']['Layers']) == 2
+
+ def test_build_stderr_data(self):
+ control_chars = ['\x1b[91m', '\x1b[0m']
+ snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
+ script = io.BytesIO(b'\n'.join([
+ b'FROM busybox',
+ 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
+ ]))
+
+ stream = self.client.build(
+ fileobj=script, decode=True, nocache=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk.get('stream'))
+ expected = '{0}{2}\n{1}'.format(
+ control_chars[0], control_chars[1], snippet
+ )
+ assert any([line == expected for line in lines])
+
+ def test_build_gzip_encoding(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("\n".join([
+ 'FROM busybox',
+ 'ADD . /test',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True,
+ gzip=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_with_dockerfile_empty_lines(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('\n'.join([
+ ' ',
+ '',
+ '\t\t',
+ '\t ',
+ ]))
+
+ stream = self.client.build(
+ path=base_dir, decode=True, nocache=True
+ )
+
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully built' in lines[-1]['stream']
+
+ def test_build_gzip_custom_encoding(self):
+ with pytest.raises(errors.DockerException):
+ self.client.build(path='.', gzip=True, encoding='text/html')
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_build_invalid_platform(self):
+ script = io.BytesIO('FROM busybox\n'.encode('ascii'))
+
+ with pytest.raises(errors.APIError) as excinfo:
+ stream = self.client.build(fileobj=script, platform='foobar')
+ for _ in stream:
+ pass
+
+ assert excinfo.value.status_code == 400
+ assert 'invalid platform' in excinfo.exconly()
+
+ def test_build_out_of_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
+ f.write('.dockerignore\n')
+ df_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, df_dir)
+ df_name = os.path.join(df_dir, 'Dockerfile')
+ with open(df_name, 'wb') as df:
+ df.write(('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ])).encode('utf-8'))
+ df.flush()
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=df_name, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 3
+ assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata)
+
+ def test_build_in_context_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='custom.dockerfile', tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_nested_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ subdir = os.path.join(base_dir, 'hello', 'world')
+ os.makedirs(subdir)
+ with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile='hello/world/custom.dockerfile',
+ tag=img_name, decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'hello']
+ ) == sorted(lsdata)
+
+ def test_build_in_context_abs_dockerfile(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile')
+ with open(os.path.join(base_dir, 'file.txt'), 'w') as f:
+ f.write('hello world')
+ with open(abs_dockerfile_path, 'w') as df:
+ df.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'WORKDIR /src',
+ ]))
+ img_name = random_name()
+ self.tmp_imgs.append(img_name)
+ stream = self.client.build(
+ path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name,
+ decode=True
+ )
+ lines = []
+ for chunk in stream:
+ lines.append(chunk)
+ assert 'Successfully tagged' in lines[-1]['stream']
+
+ ctnr = self.client.create_container(img_name, 'ls -a')
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ lsdata = self.client.logs(ctnr).strip().split(b'\n')
+ assert len(lsdata) == 4
+ assert sorted(
+ [b'.', b'..', b'file.txt', b'custom.dockerfile']
+ ) == sorted(lsdata)
+
+ @requires_api_version('1.31')
+ def test_prune_builds(self):
+ prune_result = self.client.prune_builds()
+ assert 'SpaceReclaimed' in prune_result
+ assert isinstance(prune_result['SpaceReclaimed'], int)
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
new file mode 100644
index 0000000..905e064
--- /dev/null
+++ b/tests/integration/api_client_test.py
@@ -0,0 +1,77 @@
+import time
+import unittest
+import warnings
+
+import docker
+from docker.utils import kwargs_from_env
+
+from .base import BaseAPIIntegrationTest
+
+
+class InformationTest(BaseAPIIntegrationTest):
+ def test_version(self):
+ res = self.client.version()
+ assert 'GoVersion' in res
+ assert 'Version' in res
+
+ def test_info(self):
+ res = self.client.info()
+ assert 'Containers' in res
+ assert 'Images' in res
+ assert 'Debug' in res
+
+
+class AutoDetectVersionTest(unittest.TestCase):
+ def test_client_init(self):
+ client = docker.APIClient(version='auto', **kwargs_from_env())
+ client_version = client._version
+ api_version = client.version(api_version=False)['ApiVersion']
+ assert client_version == api_version
+ api_version_2 = client.version()['ApiVersion']
+ assert client_version == api_version_2
+ client.close()
+
+
+class ConnectionTimeoutTest(unittest.TestCase):
+ def setUp(self):
+ self.timeout = 0.5
+ self.client = docker.api.APIClient(
+ version=docker.constants.MINIMUM_DOCKER_API_VERSION,
+ base_url='http://192.168.10.2:4243',
+ timeout=self.timeout
+ )
+
+ def test_timeout(self):
+ start = time.time()
+ res = None
+ # This call isn't supposed to complete, and it should fail fast.
+ try:
+ res = self.client.inspect_container('id')
+ except:
+ pass
+ end = time.time()
+ assert res is None
+ assert end - start < 2 * self.timeout
+
+
+class UnixconnTest(unittest.TestCase):
+ """
+ Test UNIX socket connection adapter.
+ """
+
+ def test_resource_warnings(self):
+ """
+ Test no warnings are produced when using the client.
+ """
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+
+ client = docker.APIClient(version='auto', **kwargs_from_env())
+ client.images()
+ client.close()
+ del client
+
+ assert len(w) == 0, "No warnings produced: {0}".format(
+ w[0].message
+ )
diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py
new file mode 100644
index 0000000..0ffd767
--- /dev/null
+++ b/tests/integration/api_config_test.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.30')
+class ConfigAPITest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def test_create_config(self):
+ config_id = self.client.create_config(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_config_unicode_data(self):
+ config_id = self.client.create_config(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_configs.append(config_id)
+ assert 'ID' in config_id
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+ data = self.client.inspect_config(config_id)
+ assert data['Spec']['Name'] == config_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_config(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ assert self.client.remove_config(config_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_config(config_id)
+
+ def test_list_configs(self):
+ config_name = 'favorite_character'
+ config_id = self.client.create_config(
+ config_name, 'sakuya izayoi'
+ )
+ self.tmp_configs.append(config_id)
+
+ data = self.client.configs(filters={'name': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == config_id['ID']
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
new file mode 100644
index 0000000..ff70148
--- /dev/null
+++ b/tests/integration/api_container_test.py
@@ -0,0 +1,1513 @@
+import os
+import re
+import signal
+import tempfile
+import threading
+from datetime import datetime
+
+import docker
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
+
+import pytest
+
+import requests
+import six
+
+from .base import BUSYBOX, BaseAPIIntegrationTest
+from .. import helpers
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
+
+
+class ListContainersTest(BaseAPIIntegrationTest):
+ def test_list_containers(self):
+ res0 = self.client.containers(all=True)
+ size = len(res0)
+ res1 = self.client.create_container(BUSYBOX, 'true')
+ assert 'Id' in res1
+ self.client.start(res1['Id'])
+ self.tmp_containers.append(res1['Id'])
+ res2 = self.client.containers(all=True)
+ assert size + 1 == len(res2)
+ retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
+ assert len(retrieved) == 1
+ retrieved = retrieved[0]
+ assert 'Command' in retrieved
+ assert retrieved['Command'] == six.text_type('true')
+ assert 'Image' in retrieved
+ assert re.search(r'busybox:.*', retrieved['Image'])
+ assert 'Status' in retrieved
+
+
+class CreateContainerTest(BaseAPIIntegrationTest):
+
+ def test_create(self):
+ res = self.client.create_container(BUSYBOX, 'true')
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+
+ def test_create_with_host_pid_mode(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true', host_config=self.client.create_host_config(
+ pid_mode='host', network_mode='none'
+ )
+ )
+ assert 'Id' in ctnr
+ self.tmp_containers.append(ctnr['Id'])
+ self.client.start(ctnr)
+ inspect = self.client.inspect_container(ctnr)
+ assert 'HostConfig' in inspect
+ host_config = inspect['HostConfig']
+ assert 'PidMode' in host_config
+ assert host_config['PidMode'] == 'host'
+
+ def test_create_with_links(self):
+ res0 = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True,
+ environment={'FOO': '1'})
+
+ container1_id = res0['Id']
+ self.tmp_containers.append(container1_id)
+
+ self.client.start(container1_id)
+
+ res1 = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True,
+ environment={'FOO': '1'})
+
+ container2_id = res1['Id']
+ self.tmp_containers.append(container2_id)
+
+ self.client.start(container2_id)
+
+ # we don't want the first /
+ link_path1 = self.client.inspect_container(container1_id)['Name'][1:]
+ link_alias1 = 'mylink1'
+ link_env_prefix1 = link_alias1.upper()
+
+ link_path2 = self.client.inspect_container(container2_id)['Name'][1:]
+ link_alias2 = 'mylink2'
+ link_env_prefix2 = link_alias2.upper()
+
+ res2 = self.client.create_container(
+ BUSYBOX, 'env', host_config=self.client.create_host_config(
+ links={link_path1: link_alias1, link_path2: link_alias2},
+ network_mode='bridge'
+ )
+ )
+ container3_id = res2['Id']
+ self.tmp_containers.append(container3_id)
+ self.client.start(container3_id)
+ assert self.client.wait(container3_id)['StatusCode'] == 0
+
+ logs = self.client.logs(container3_id)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert '{0}_NAME='.format(link_env_prefix1) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix1) in logs
+ assert '{0}_NAME='.format(link_env_prefix2) in logs
+ assert '{0}_ENV_FOO=1'.format(link_env_prefix2) in logs
+
+ def test_create_with_restart_policy(self):
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '2'],
+ host_config=self.client.create_host_config(
+ restart_policy={"Name": "always", "MaximumRetryCount": 0},
+ network_mode='none'
+ )
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.client.wait(id)
+ with pytest.raises(docker.errors.APIError) as exc:
+ self.client.remove_container(id)
+ err = exc.value.explanation
+ assert 'You cannot remove ' in err
+ self.client.remove_container(id, force=True)
+
+ def test_create_container_with_volumes_from(self):
+ vol_names = ['foobar_vol0', 'foobar_vol1']
+
+ res0 = self.client.create_container(
+ BUSYBOX, 'true', name=vol_names[0]
+ )
+ container1_id = res0['Id']
+ self.tmp_containers.append(container1_id)
+ self.client.start(container1_id)
+
+ res1 = self.client.create_container(
+ BUSYBOX, 'true', name=vol_names[1]
+ )
+ container2_id = res1['Id']
+ self.tmp_containers.append(container2_id)
+ self.client.start(container2_id)
+
+ res = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True,
+ host_config=self.client.create_host_config(
+ volumes_from=vol_names, network_mode='none'
+ )
+ )
+ container3_id = res['Id']
+ self.tmp_containers.append(container3_id)
+ self.client.start(container3_id)
+
+ info = self.client.inspect_container(res['Id'])
+ assert len(info['HostConfig']['VolumesFrom']) == len(vol_names)
+
+ def create_container_readonly_fs(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, ['mkdir', '/shrine'],
+ host_config=self.client.create_host_config(
+ read_only=True, network_mode='none'
+ )
+ )
+ assert 'Id' in ctnr
+ self.tmp_containers.append(ctnr['Id'])
+ self.client.start(ctnr)
+ res = self.client.wait(ctnr)['StatusCode']
+ assert res != 0
+
+ def create_container_with_name(self):
+ res = self.client.create_container(BUSYBOX, 'true', name='foobar')
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+ inspect = self.client.inspect_container(res['Id'])
+ assert 'Name' in inspect
+ assert '/foobar' == inspect['Name']
+
+ def create_container_privileged(self):
+ res = self.client.create_container(
+ BUSYBOX, 'true', host_config=self.client.create_host_config(
+ privileged=True, network_mode='none'
+ )
+ )
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+ self.client.start(res['Id'])
+ inspect = self.client.inspect_container(res['Id'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
+ if not inspect['State']['Running']:
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
+ # Since Nov 2013, the Privileged flag is no longer part of the
+ # container's config exposed via the API (safety concerns?).
+ #
+ if 'Privileged' in inspect['Config']:
+ assert inspect['Config']['Privileged'] is True
+
+ def test_create_with_mac_address(self):
+ mac_address_expected = "02:42:ac:11:00:0a"
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
+
+ id = container['Id']
+
+ self.client.start(container)
+ res = self.client.inspect_container(container['Id'])
+ assert mac_address_expected == res['NetworkSettings']['MacAddress']
+
+ self.client.kill(id)
+
+ def test_group_id_ints(self):
+ container = self.client.create_container(
+ BUSYBOX, 'id -G',
+ host_config=self.client.create_host_config(group_add=[1000, 1001])
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.wait(container)
+
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ groups = logs.strip().split(' ')
+ assert '1000' in groups
+ assert '1001' in groups
+
+ def test_group_id_strings(self):
+ container = self.client.create_container(
+ BUSYBOX, 'id -G', host_config=self.client.create_host_config(
+ group_add=['1000', '1001']
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.wait(container)
+
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+
+ groups = logs.strip().split(' ')
+ assert '1000' in groups
+ assert '1001' in groups
+
+ def test_valid_log_driver_and_log_opt(self):
+ log_config = docker.types.LogConfig(
+ type='json-file',
+ config={'max-file': '100'}
+ )
+
+ container = self.client.create_container(
+ BUSYBOX, ['true'],
+ host_config=self.client.create_host_config(log_config=log_config)
+ )
+ self.tmp_containers.append(container['Id'])
+ self.client.start(container)
+
+ info = self.client.inspect_container(container)
+ container_log_config = info['HostConfig']['LogConfig']
+
+ assert container_log_config['Type'] == log_config.type
+ assert container_log_config['Config'] == log_config.config
+
+ def test_invalid_log_driver_raises_exception(self):
+ log_config = docker.types.LogConfig(
+ type='asdf-nope',
+ config={}
+ )
+
+ expected_msg = "logger: no log driver named 'asdf-nope' is registered"
+ with pytest.raises(docker.errors.APIError) as excinfo:
+ # raises an internal server error 500
+ container = self.client.create_container(
+ BUSYBOX, ['true'], host_config=self.client.create_host_config(
+ log_config=log_config
+ )
+ )
+ self.client.start(container)
+
+ assert excinfo.value.explanation == expected_msg
+
+ def test_valid_no_log_driver_specified(self):
+ log_config = docker.types.LogConfig(
+ type="",
+ config={'max-file': '100'}
+ )
+
+ container = self.client.create_container(
+ BUSYBOX, ['true'],
+ host_config=self.client.create_host_config(log_config=log_config)
+ )
+ self.tmp_containers.append(container['Id'])
+ self.client.start(container)
+
+ info = self.client.inspect_container(container)
+ container_log_config = info['HostConfig']['LogConfig']
+
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == log_config.config
+
+ def test_valid_no_config_specified(self):
+ log_config = docker.types.LogConfig(
+ type="json-file",
+ config=None
+ )
+
+ container = self.client.create_container(
+ BUSYBOX, ['true'],
+ host_config=self.client.create_host_config(log_config=log_config)
+ )
+ self.tmp_containers.append(container['Id'])
+ self.client.start(container)
+
+ info = self.client.inspect_container(container)
+ container_log_config = info['HostConfig']['LogConfig']
+
+ assert container_log_config['Type'] == "json-file"
+ assert container_log_config['Config'] == {}
+
+ def test_create_with_memory_constraints_with_str(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ memswap_limit='1G',
+ mem_limit='700M'
+ )
+ )
+ assert 'Id' in ctnr
+ self.tmp_containers.append(ctnr['Id'])
+ self.client.start(ctnr)
+ inspect = self.client.inspect_container(ctnr)
+
+ assert 'HostConfig' in inspect
+ host_config = inspect['HostConfig']
+ for limit in ['Memory', 'MemorySwap']:
+ assert limit in host_config
+
+ def test_create_with_memory_constraints_with_int(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(mem_swappiness=40)
+ )
+ assert 'Id' in ctnr
+ self.tmp_containers.append(ctnr['Id'])
+ self.client.start(ctnr)
+ inspect = self.client.inspect_container(ctnr)
+
+ assert 'HostConfig' in inspect
+ host_config = inspect['HostConfig']
+ assert 'MemorySwappiness' in host_config
+
+ def test_create_with_environment_variable_no_value(self):
+ container = self.client.create_container(
+ BUSYBOX,
+ ['echo'],
+ environment={'Foo': None, 'Other': 'one', 'Blank': ''},
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container['Id'])
+ assert (
+ sorted(config['Config']['Env']) ==
+ sorted(['Foo', 'Other=one', 'Blank='])
+ )
+
+ @requires_api_version('1.22')
+ def test_create_with_tmpfs(self):
+ tmpfs = {
+ '/tmp1': 'size=3M'
+ }
+
+ container = self.client.create_container(
+ BUSYBOX,
+ ['echo'],
+ host_config=self.client.create_host_config(
+ tmpfs=tmpfs))
+
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Tmpfs'] == tmpfs
+
+ @requires_api_version('1.24')
+ def test_create_with_isolation(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ isolation='default'
+ )
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Isolation'] == 'default'
+
+ @requires_api_version('1.25')
+ def test_create_with_auto_remove(self):
+ host_config = self.client.create_host_config(
+ auto_remove=True
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['AutoRemove'] is True
+
+ @requires_api_version('1.25')
+ def test_create_with_stop_timeout(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], stop_timeout=25
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['Config']['StopTimeout'] == 25
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ def test_create_with_storage_opt(self):
+ host_config = self.client.create_host_config(
+ storage_opt={'size': '120G'}
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['StorageOpt'] == {
+ 'size': '120G'
+ }
+
+ @requires_api_version('1.25')
+ def test_create_with_init(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init=True
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['Init'] is True
+
+ @pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
+ @requires_api_version('1.25')
+ def test_create_with_init_path(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init_path="/usr/libexec/docker-init"
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'),
+ reason='CONFIG_RT_GROUP_SCHED isn\'t enabled')
+ def test_create_with_cpu_rt_options(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true', host_config=self.client.create_host_config(
+ cpu_rt_period=1000, cpu_rt_runtime=500
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['CpuRealtimeRuntime'] == 500
+ assert config['HostConfig']['CpuRealtimePeriod'] == 1000
+
+ @requires_api_version('1.28')
+ def test_create_with_device_cgroup_rules(self):
+ rule = 'c 7:128 rwm'
+ ctnr = self.client.create_container(
+ BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list',
+ host_config=self.client.create_host_config(
+ device_cgroup_rules=[rule]
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['DeviceCgroupRules'] == [rule]
+ self.client.start(ctnr)
+ assert rule in self.client.logs(ctnr).decode('utf-8')
+
+
+@pytest.mark.xfail(
+ IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
+)
+class VolumeBindTest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(VolumeBindTest, self).setUp()
+
+ self.mount_dest = '/mnt'
+
+ # Get a random pathname - we don't need it to exist locally
+ self.mount_origin = tempfile.mkdtemp()
+ self.filename = 'shared.txt'
+
+ self.run_with_volume(
+ False,
+ BUSYBOX,
+ ['touch', os.path.join(self.mount_dest, self.filename)],
+ )
+
+ def test_create_with_binds_rw(self):
+
+ container = self.run_with_volume(
+ False,
+ BUSYBOX,
+ ['ls', self.mount_dest],
+ )
+ logs = self.client.logs(container)
+
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, True)
+
+ def test_create_with_binds_ro(self):
+ self.run_with_volume(
+ False,
+ BUSYBOX,
+ ['touch', os.path.join(self.mount_dest, self.filename)],
+ )
+ container = self.run_with_volume(
+ True,
+ BUSYBOX,
+ ['ls', self.mount_dest],
+ )
+ logs = self.client.logs(container)
+
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, False)
+
+ @requires_api_version('1.30')
+ def test_create_with_mounts(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, True)
+
+ @requires_api_version('1.30')
+ def test_create_with_mounts_ro(self):
+ mount = docker.types.Mount(
+ type="bind", source=self.mount_origin, target=self.mount_dest,
+ read_only=True
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.run_container(
+ BUSYBOX, ['ls', self.mount_dest],
+ host_config=host_config
+ )
+ assert container
+ logs = self.client.logs(container)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert self.filename in logs
+ inspect_data = self.client.inspect_container(container)
+ self.check_container_data(inspect_data, False)
+
+ @requires_api_version('1.30')
+ def test_create_with_volume_mount(self):
+ mount = docker.types.Mount(
+ type="volume", source=helpers.random_name(),
+ target=self.mount_dest, labels={'com.dockerpy.test': 'true'}
+ )
+ host_config = self.client.create_host_config(mounts=[mount])
+ container = self.client.create_container(
+ BUSYBOX, ['true'], host_config=host_config,
+ )
+ assert container
+ inspect_data = self.client.inspect_container(container)
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount['Source'] == mount_data['Name']
+ assert mount_data['RW'] is True
+
+ def check_container_data(self, inspect_data, rw):
+ assert 'Mounts' in inspect_data
+ filtered = list(filter(
+ lambda x: x['Destination'] == self.mount_dest,
+ inspect_data['Mounts']
+ ))
+ assert len(filtered) == 1
+ mount_data = filtered[0]
+ assert mount_data['Source'] == self.mount_origin
+ assert mount_data['RW'] == rw
+
+ def run_with_volume(self, ro, *args, **kwargs):
+ return self.run_container(
+ *args,
+ volumes={self.mount_dest: {}},
+ host_config=self.client.create_host_config(
+ binds={
+ self.mount_origin: {
+ 'bind': self.mount_dest,
+ 'ro': ro,
+ },
+ },
+ network_mode='none'
+ ),
+ **kwargs
+ )
+
+
+class ArchiveTest(BaseAPIIntegrationTest):
+ def test_get_file_archive_from_container(self):
+ data = 'The Maid and the Pocket Watch of Blood'
+ ctnr = self.client.create_container(
+ BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
+ volumes=['/vol1']
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ with tempfile.NamedTemporaryFile() as destination:
+ strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
+ for d in strm:
+ destination.write(d)
+ destination.seek(0)
+ retrieved_data = helpers.untar_file(destination, 'data.txt')
+ if six.PY3:
+ retrieved_data = retrieved_data.decode('utf-8')
+ assert data == retrieved_data.strip()
+
+ def test_get_file_stat_from_container(self):
+ data = 'The Maid and the Pocket Watch of Blood'
+ ctnr = self.client.create_container(
+ BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
+ volumes=['/vol1']
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
+ assert 'name' in stat
+ assert stat['name'] == 'data.txt'
+ assert 'size' in stat
+ assert stat['size'] == len(data)
+
+ def test_copy_file_to_container(self):
+ data = b'Deaf To All But The Song'
+ with tempfile.NamedTemporaryFile(delete=False) as test_file:
+ test_file.write(data)
+ test_file.seek(0)
+ ctnr = self.client.create_container(
+ BUSYBOX,
+ 'cat {0}'.format(
+ os.path.join('/vol1/', os.path.basename(test_file.name))
+ ),
+ volumes=['/vol1']
+ )
+ self.tmp_containers.append(ctnr)
+ with helpers.simple_tar(test_file.name) as test_tar:
+ self.client.put_archive(ctnr, '/vol1', test_tar)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ data = data.decode('utf-8')
+ assert logs.strip() == data
+
+ def test_copy_directory_to_container(self):
+ files = ['a.py', 'b.py', 'foo/b.py']
+ dirs = ['foo', 'bar']
+ base = helpers.make_tree(dirs, files)
+ ctnr = self.client.create_container(
+ BUSYBOX, 'ls -p /vol1', volumes=['/vol1']
+ )
+ self.tmp_containers.append(ctnr)
+ with docker.utils.tar(base) as test_tar:
+ self.client.put_archive(ctnr, '/vol1', test_tar)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ results = logs.strip().split()
+ assert 'a.py' in results
+ assert 'b.py' in results
+ assert 'foo/' in results
+ assert 'bar/' in results
+
+
+class RenameContainerTest(BaseAPIIntegrationTest):
+ def test_rename_container(self):
+ version = self.client.version()['Version']
+ name = 'hong_meiling'
+ res = self.client.create_container(BUSYBOX, 'true')
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+ self.client.rename(res, name)
+ inspect = self.client.inspect_container(res['Id'])
+ assert 'Name' in inspect
+ if version == '1.5.0':
+ assert name == inspect['Name']
+ else:
+ assert '/{0}'.format(name) == inspect['Name']
+
+
+class StartContainerTest(BaseAPIIntegrationTest):
+ def test_start_container(self):
+ res = self.client.create_container(BUSYBOX, 'true')
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+ self.client.start(res['Id'])
+ inspect = self.client.inspect_container(res['Id'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
+ if not inspect['State']['Running']:
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
+
+ def test_start_container_with_dict_instead_of_id(self):
+ res = self.client.create_container(BUSYBOX, 'true')
+ assert 'Id' in res
+ self.tmp_containers.append(res['Id'])
+ self.client.start(res)
+ inspect = self.client.inspect_container(res['Id'])
+ assert 'Config' in inspect
+ assert 'Id' in inspect
+ assert inspect['Id'].startswith(res['Id'])
+ assert 'Image' in inspect
+ assert 'State' in inspect
+ assert 'Running' in inspect['State']
+ if not inspect['State']['Running']:
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == 0
+
+ def test_run_shlex_commands(self):
+ commands = [
+ 'true',
+ 'echo "The Young Descendant of Tepes & Septette for the '
+ 'Dead Princess"',
+ 'echo -n "The Young Descendant of Tepes & Septette for the '
+ 'Dead Princess"',
+ '/bin/sh -c "echo Hello World"',
+ '/bin/sh -c \'echo "Hello World"\'',
+ 'echo "\"Night of Nights\""',
+ 'true && echo "Night of Nights"'
+ ]
+ for cmd in commands:
+ container = self.client.create_container(BUSYBOX, cmd)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0, cmd
+
+
+class WaitTest(BaseAPIIntegrationTest):
+ def test_wait(self):
+ res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ id = res['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ inspect = self.client.inspect_container(id)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
+
+ def test_wait_with_dict_instead_of_id(self):
+ res = self.client.create_container(BUSYBOX, ['sleep', '3'])
+ id = res['Id']
+ self.tmp_containers.append(id)
+ self.client.start(res)
+ exitcode = self.client.wait(res)['StatusCode']
+ assert exitcode == 0
+ inspect = self.client.inspect_container(res)
+ assert 'Running' in inspect['State']
+ assert inspect['State']['Running'] is False
+ assert 'ExitCode' in inspect['State']
+ assert inspect['State']['ExitCode'] == exitcode
+
+ @requires_api_version('1.30')
+ def test_wait_with_condition(self):
+ ctnr = self.client.create_container(BUSYBOX, 'true')
+ self.tmp_containers.append(ctnr)
+ with pytest.raises(requests.exceptions.ConnectionError):
+ self.client.wait(ctnr, condition='removed', timeout=1)
+
+ ctnr = self.client.create_container(
+ BUSYBOX, ['sleep', '3'],
+ host_config=self.client.create_host_config(auto_remove=True)
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ assert self.client.wait(
+ ctnr, condition='removed', timeout=5
+ )['StatusCode'] == 0
+
+
+class LogsTest(BaseAPIIntegrationTest):
+ def test_logs(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo {0}'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ logs = self.client.logs(id)
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ def test_logs_tail_option(self):
+ snippet = '''Line1
+Line2'''
+ container = self.client.create_container(
+ BUSYBOX, 'echo "{0}"'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ logs = self.client.logs(id, tail=1)
+ assert logs == 'Line2\n'.encode(encoding='ascii')
+
+ def test_logs_streaming_and_follow(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo {0}'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ logs = six.binary_type()
+ for chunk in self.client.logs(id, stream=True, follow=True):
+ logs += chunk
+
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ @pytest.mark.timeout(5)
+ def test_logs_streaming_and_follow_and_cancel(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo \\"{0}\\" && sleep 3"'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ logs = six.binary_type()
+
+ generator = self.client.logs(id, stream=True, follow=True)
+ threading.Timer(1, generator.close).start()
+
+ for chunk in generator:
+ logs += chunk
+
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ def test_logs_with_dict_instead_of_id(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo {0}'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ logs = self.client.logs(container)
+ assert logs == (snippet + '\n').encode(encoding='ascii')
+
+ def test_logs_with_tail_0(self):
+ snippet = 'Flowering Nights (Sakuya Iyazoi)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo "{0}"'.format(snippet)
+ )
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ logs = self.client.logs(id, tail=0)
+ assert logs == ''.encode(encoding='ascii')
+
+ @requires_api_version('1.35')
+ def test_logs_with_until(self):
+ snippet = 'Shanghai Teahouse (Hong Meiling)'
+ container = self.client.create_container(
+ BUSYBOX, 'echo "{0}"'.format(snippet)
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+ assert exitcode == 0
+ logs_until_1 = self.client.logs(container, until=1)
+ assert logs_until_1 == b''
+ logs_until_now = self.client.logs(container, datetime.now())
+ assert logs_until_now == (snippet + '\n').encode(encoding='ascii')
+
+
+class DiffTest(BaseAPIIntegrationTest):
+ def test_diff(self):
+ container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ diff = self.client.diff(id)
+ test_diff = [x for x in diff if x.get('Path', None) == '/test']
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
+
+ def test_diff_with_dict_instead_of_id(self):
+ container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode == 0
+ diff = self.client.diff(container)
+ test_diff = [x for x in diff if x.get('Path', None) == '/test']
+ assert len(test_diff) == 1
+ assert 'Kind' in test_diff[0]
+ assert test_diff[0]['Kind'] == 1
+
+
+class StopTest(BaseAPIIntegrationTest):
+ def test_stop(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ self.client.stop(id, timeout=2)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'Running' in state
+ assert state['Running'] is False
+
+ def test_stop_with_dict_instead_of_id(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ assert 'Id' in container
+ id = container['Id']
+ self.client.start(container)
+ self.tmp_containers.append(id)
+ self.client.stop(container, timeout=2)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'Running' in state
+ assert state['Running'] is False
+
+
+class KillTest(BaseAPIIntegrationTest):
+ def test_kill(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ self.client.kill(id)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
+
+ def test_kill_with_dict_instead_of_id(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ self.client.kill(container)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False
+
+ def test_kill_with_signal(self):
+ id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ self.client.kill(
+ id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9
+ )
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
+
+ def test_kill_with_signal_name(self):
+ id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ self.client.kill(id, signal='SIGKILL')
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
+
+ def test_kill_with_signal_integer(self):
+ id = self.client.create_container(BUSYBOX, ['sleep', '60'])
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ self.client.kill(id, signal=9)
+ exitcode = self.client.wait(id)['StatusCode']
+ assert exitcode != 0
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] != 0
+ assert 'Running' in state
+ assert state['Running'] is False, state
+
+
+class PortTest(BaseAPIIntegrationTest):
+ def test_port(self):
+
+ port_bindings = {
+ '1111': ('127.0.0.1', '4567'),
+ '2222': ('127.0.0.1', '4568')
+ }
+
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
+ host_config=self.client.create_host_config(
+ port_bindings=port_bindings, network_mode='bridge'
+ )
+ )
+ id = container['Id']
+
+ self.client.start(container)
+
+ # Call the port function on each biding and compare expected vs actual
+ for port in port_bindings:
+ actual_bindings = self.client.port(container, port)
+ port_binding = actual_bindings.pop()
+
+ ip, host_port = port_binding['HostIp'], port_binding['HostPort']
+
+ assert ip == port_bindings[port][0]
+ assert host_port == port_bindings[port][1]
+
+ self.client.kill(id)
+
+
+class ContainerTopTest(BaseAPIIntegrationTest):
+ def test_top(self):
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60']
+ )
+
+ self.tmp_containers.append(container)
+
+ self.client.start(container)
+ res = self.client.top(container)
+ if not IS_WINDOWS_PLATFORM:
+ assert res['Titles'] == [
+ 'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD'
+ ]
+ assert len(res['Processes']) == 1
+ assert res['Processes'][0][-1] == 'sleep 60'
+ self.client.kill(container)
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM, reason='No psargs support on windows'
+ )
+ def test_top_with_psargs(self):
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'])
+
+ self.tmp_containers.append(container)
+
+ self.client.start(container)
+ res = self.client.top(container, 'waux')
+ assert res['Titles'] == [
+ 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
+ 'TTY', 'STAT', 'START', 'TIME', 'COMMAND'
+ ]
+ assert len(res['Processes']) == 1
+ assert res['Processes'][0][10] == 'sleep 60'
+
+
+class RestartContainerTest(BaseAPIIntegrationTest):
+ def test_restart(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ info = self.client.inspect_container(id)
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
+ start_time1 = info['State']['StartedAt']
+ self.client.restart(id, timeout=2)
+ info2 = self.client.inspect_container(id)
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
+ start_time2 = info2['State']['StartedAt']
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
+ self.client.kill(id)
+
+ def test_restart_with_low_timeout(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.client.start(container)
+ self.client.timeout = 1
+ self.client.restart(container, timeout=3)
+ self.client.timeout = None
+ self.client.restart(container, timeout=3)
+ self.client.kill(container)
+
+ def test_restart_with_dict_instead_of_id(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ assert 'Id' in container
+ id = container['Id']
+ self.client.start(container)
+ self.tmp_containers.append(id)
+ info = self.client.inspect_container(id)
+ assert 'State' in info
+ assert 'StartedAt' in info['State']
+ start_time1 = info['State']['StartedAt']
+ self.client.restart(container, timeout=2)
+ info2 = self.client.inspect_container(id)
+ assert 'State' in info2
+ assert 'StartedAt' in info2['State']
+ start_time2 = info2['State']['StartedAt']
+ assert start_time1 != start_time2
+ assert 'Running' in info2['State']
+ assert info2['State']['Running'] is True
+ self.client.kill(id)
+
+
+class RemoveContainerTest(BaseAPIIntegrationTest):
+ def test_remove(self):
+ container = self.client.create_container(BUSYBOX, ['true'])
+ id = container['Id']
+ self.client.start(id)
+ self.client.wait(id)
+ self.client.remove_container(id)
+ containers = self.client.containers(all=True)
+ res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
+ assert len(res) == 0
+
+ def test_remove_with_dict_instead_of_id(self):
+ container = self.client.create_container(BUSYBOX, ['true'])
+ id = container['Id']
+ self.client.start(id)
+ self.client.wait(id)
+ self.client.remove_container(container)
+ containers = self.client.containers(all=True)
+ res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
+ assert len(res) == 0
+
+
+class AttachContainerTest(BaseAPIIntegrationTest):
+ def test_run_container_streaming(self):
+ container = self.client.create_container(BUSYBOX, '/bin/sh',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+ sock = self.client.attach_socket(container, ws=False)
+ assert sock.fileno() > -1
+
+ def test_run_container_reading_socket(self):
+ line = 'hi there and stuff and things, words!'
+ # `echo` appends CRLF, `printf` doesn't
+ command = "printf '{0}'".format(line)
+ container = self.client.create_container(BUSYBOX, command,
+ detach=True, tty=False)
+ self.tmp_containers.append(container)
+
+ opts = {"stdout": 1, "stream": 1, "logs": 1}
+ pty_stdout = self.client.attach_socket(container, opts)
+ self.addCleanup(pty_stdout.close)
+
+ self.client.start(container)
+
+ next_size = next_frame_size(pty_stdout)
+ assert next_size == len(line)
+ data = read_exactly(pty_stdout, next_size)
+ assert data.decode('utf-8') == line
+
+ def test_attach_no_stream(self):
+ container = self.client.create_container(
+ BUSYBOX, 'echo hello'
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ output = self.client.attach(container, stream=False, logs=True)
+ assert output == 'hello\n'.encode(encoding='ascii')
+
+ @pytest.mark.timeout(5)
+ def test_attach_stream_and_cancel(self):
+ container = self.client.create_container(
+ BUSYBOX, 'sh -c "echo hello && sleep 60"',
+ tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ output = self.client.attach(container, stream=True, logs=True)
+
+ threading.Timer(1, output.close).start()
+
+ lines = []
+ for line in output:
+ lines.append(line)
+
+ assert len(lines) == 1
+ assert lines[0] == 'hello\r\n'.encode(encoding='ascii')
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True}
+ )
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+
+ container = self.client.create_container(
+ BUSYBOX, 'cat',
+ detach=True, stdin_open=True, tty=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ sock = self.client.attach_socket(
+ container,
+ {'stdin': True, 'stream': True, 'detachKeys': 'ctrl-x'}
+ )
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
+
+
+class PauseTest(BaseAPIIntegrationTest):
+ def test_pause_unpause(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(container)
+ self.client.pause(id)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is True
+
+ self.client.unpause(id)
+ container_info = self.client.inspect_container(id)
+ assert 'State' in container_info
+ state = container_info['State']
+ assert 'ExitCode' in state
+ assert state['ExitCode'] == 0
+ assert 'Running' in state
+ assert state['Running'] is True
+ assert 'Paused' in state
+ assert state['Paused'] is False
+
+
+class PruneTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.25')
+ def test_prune_containers(self):
+ container1 = self.client.create_container(
+ BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
+ )
+ container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.client.start(container1)
+ self.client.start(container2)
+ self.client.wait(container1)
+ result = self.client.prune_containers()
+ assert container1['Id'] in result['ContainersDeleted']
+ assert result['SpaceReclaimed'] > 0
+ assert container2['Id'] not in result['ContainersDeleted']
+
+
+class GetContainerStatsTest(BaseAPIIntegrationTest):
+ def test_get_container_stats_no_stream(self):
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'],
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ response = self.client.stats(container, stream=0)
+ self.client.kill(container)
+
+ assert type(response) == dict
+ for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in response
+
+ def test_get_container_stats_stream(self):
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'],
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ stream = self.client.stats(container)
+ for chunk in stream:
+ assert type(chunk) == dict
+ for key in ['read', 'network', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in chunk
+
+
+class ContainerUpdateTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.22')
+ def test_update_container(self):
+ old_mem_limit = 400 * 1024 * 1024
+ new_mem_limit = 300 * 1024 * 1024
+ container = self.client.create_container(
+ BUSYBOX, 'top', host_config=self.client.create_host_config(
+ mem_limit=old_mem_limit
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.update_container(container, mem_limit=new_mem_limit)
+ inspect_data = self.client.inspect_container(container)
+ assert inspect_data['HostConfig']['Memory'] == new_mem_limit
+
+ @requires_api_version('1.23')
+ def test_restart_policy_update(self):
+ old_restart_policy = {
+ 'MaximumRetryCount': 0,
+ 'Name': 'always'
+ }
+ new_restart_policy = {
+ 'MaximumRetryCount': 42,
+ 'Name': 'on-failure'
+ }
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'],
+ host_config=self.client.create_host_config(
+ restart_policy=old_restart_policy
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.update_container(container,
+ restart_policy=new_restart_policy)
+ inspect_data = self.client.inspect_container(container)
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'] ==
+ new_restart_policy['MaximumRetryCount']
+ )
+ assert (
+ inspect_data['HostConfig']['RestartPolicy']['Name'] ==
+ new_restart_policy['Name']
+ )
+
+
+class ContainerCPUTest(BaseAPIIntegrationTest):
+ def test_container_cpu_shares(self):
+ cpu_shares = 512
+ container = self.client.create_container(
+ BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ cpu_shares=cpu_shares
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ inspect_data = self.client.inspect_container(container)
+ assert inspect_data['HostConfig']['CpuShares'] == 512
+
+ def test_container_cpuset(self):
+ cpuset_cpus = "0,1"
+ container = self.client.create_container(
+ BUSYBOX, 'ls', host_config=self.client.create_host_config(
+ cpuset_cpus=cpuset_cpus
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ inspect_data = self.client.inspect_container(container)
+ assert inspect_data['HostConfig']['CpusetCpus'] == cpuset_cpus
+
+ @requires_api_version('1.25')
+ def test_create_with_runtime(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], runtime='runc'
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Runtime'] == 'runc'
+
+
+class LinkTest(BaseAPIIntegrationTest):
+ def test_remove_link(self):
+ # Create containers
+ container1 = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ container1_id = container1['Id']
+ self.tmp_containers.append(container1_id)
+ self.client.start(container1_id)
+
+ # Create Link
+ # we don't want the first /
+ link_path = self.client.inspect_container(container1_id)['Name'][1:]
+ link_alias = 'mylink'
+
+ container2 = self.client.create_container(
+ BUSYBOX, 'cat', host_config=self.client.create_host_config(
+ links={link_path: link_alias}
+ )
+ )
+ container2_id = container2['Id']
+ self.tmp_containers.append(container2_id)
+ self.client.start(container2_id)
+
+ # Remove link
+ linked_name = self.client.inspect_container(container2_id)['Name'][1:]
+ link_name = '%s/%s' % (linked_name, link_alias)
+ self.client.remove_container(link_name, link=True)
+
+ # Link is gone
+ containers = self.client.containers(all=True)
+ retrieved = [x for x in containers if link_name in x['Names']]
+ assert len(retrieved) == 0
+
+ # Containers are still there
+ retrieved = [
+ x for x in containers if x['Id'].startswith(container1_id) or
+ x['Id'].startswith(container2_id)
+ ]
+ assert len(retrieved) == 2
diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py
new file mode 100644
index 0000000..1a5a4e5
--- /dev/null
+++ b/tests/integration/api_exec_test.py
@@ -0,0 +1,205 @@
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
+
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import (
+ requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
+)
+
+
+class ExecTest(BaseAPIIntegrationTest):
+ def test_execute_command(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, ['echo', 'hello'])
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello\n'
+
+ def test_exec_command_string(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'echo hello world')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'hello world\n'
+
+ def test_exec_command_as_user(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'whoami', user='default')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'default\n'
+
+ def test_exec_command_as_root(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'whoami')
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'root\n'
+
+ def test_exec_command_streaming(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.tmp_containers.append(id)
+ self.client.start(id)
+
+ exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
+ assert 'Id' in exec_id
+
+ res = b''
+ for chunk in self.client.exec_start(exec_id, stream=True):
+ res += chunk
+ assert res == b'hello\nworld\n'
+
+ def test_exec_start_socket(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ container_id = container['Id']
+ self.client.start(container_id)
+ self.tmp_containers.append(container_id)
+
+ line = 'yay, interactive exec!'
+ # `echo` appends CRLF, `printf` doesn't
+ exec_id = self.client.exec_create(
+ container_id, ['printf', line], tty=True)
+ assert 'Id' in exec_id
+
+ socket = self.client.exec_start(exec_id, socket=True)
+ self.addCleanup(socket.close)
+
+ next_size = next_frame_size(socket)
+ assert next_size == len(line)
+ data = read_exactly(socket, next_size)
+ assert data.decode('utf-8') == line
+
+ def test_exec_start_detached(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ container_id = container['Id']
+ self.client.start(container_id)
+ self.tmp_containers.append(container_id)
+
+ exec_id = self.client.exec_create(
+ container_id, ['printf', "asdqwe"])
+ assert 'Id' in exec_id
+
+ response = self.client.exec_start(exec_id, detach=True)
+
+ assert response == ""
+
+ def test_exec_inspect(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
+ assert 'Id' in exec_id
+ self.client.exec_start(exec_id)
+ exec_info = self.client.exec_inspect(exec_id)
+ assert 'ExitCode' in exec_info
+ assert exec_info['ExitCode'] != 0
+
+ @requires_api_version('1.25')
+ def test_exec_command_with_env(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'env', environment=["X=Y"])
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert b'X=Y\n' in exec_log
+
+ @requires_api_version('1.35')
+ def test_exec_command_with_workdir(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ res = self.client.exec_create(container, 'pwd', workdir='/var/www')
+ exec_log = self.client.exec_start(res)
+ assert exec_log == b'/var/www\n'
+
+ def test_detach_with_default(self):
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(
+ sock, [ctrl_with('p'), ctrl_with('q')]
+ )
+
+ def test_detach_with_config_file(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat', stdin=True, tty=True, stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
+
+ def test_detach_with_arg(self):
+ self.client._general_configs['detachKeys'] = 'ctrl-p'
+ container = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ exec_id = self.client.exec_create(
+ id, 'cat',
+ stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
+ )
+ sock = self.client.exec_start(exec_id, tty=True, socket=True)
+ self.addCleanup(sock.close)
+
+ assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
new file mode 100644
index 0000000..5dbac37
--- /dev/null
+++ b/tests/integration/api_healthcheck_test.py
@@ -0,0 +1,68 @@
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from .. import helpers
+
+SECOND = 1000000000
+
+
+def wait_on_health_status(client, container, status):
+ def condition():
+ res = client.inspect_container(container)
+ return res['State']['Health']['Status'] == status
+ return helpers.wait_on_condition(condition)
+
+
+class HealthcheckTest(BaseAPIIntegrationTest):
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_shell_command(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ self.tmp_containers.append(container)
+
+ res = self.client.inspect_container(container)
+ assert res['Config']['Healthcheck']['Test'] == [
+ 'CMD-SHELL', 'echo "hello world"'
+ ]
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_passes(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="true",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_fails(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="false",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "unhealthy")
+
+ @helpers.requires_api_version('1.29')
+ def test_healthcheck_start_period(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="echo 'x' >> /counter.txt && "
+ "test `cat /counter.txt | wc -l` -ge 3",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ start_period=3 * SECOND
+ )
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
new file mode 100644
index 0000000..050e7f3
--- /dev/null
+++ b/tests/integration/api_image_test.py
@@ -0,0 +1,368 @@
+import contextlib
+import json
+import shutil
+import socket
+import tarfile
+import tempfile
+import threading
+
+import pytest
+import six
+from six.moves import BaseHTTPServer
+from six.moves import socketserver
+
+
+import docker
+
+from ..helpers import requires_api_version, requires_experimental
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class ListImagesTest(BaseAPIIntegrationTest):
+ def test_images(self):
+ res1 = self.client.images(all=True)
+ assert 'Id' in res1[0]
+ res10 = res1[0]
+ assert 'Created' in res10
+ assert 'RepoTags' in res10
+ distinct = []
+ for img in res1:
+ if img['Id'] not in distinct:
+ distinct.append(img['Id'])
+ assert len(distinct) == self.client.info()['Images']
+
+ def test_images_quiet(self):
+ res1 = self.client.images(quiet=True)
+ assert type(res1[0]) == six.text_type
+
+
+class PullImageTest(BaseAPIIntegrationTest):
+ def test_pull(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+ res = self.client.pull('hello-world', tag='latest')
+ self.tmp_imgs.append('hello-world')
+ assert type(res) == six.text_type
+ assert len(self.client.images('hello-world')) >= 1
+ img_info = self.client.inspect_image('hello-world')
+ assert 'Id' in img_info
+
+ def test_pull_streaming(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+ stream = self.client.pull(
+ 'hello-world', tag='latest', stream=True, decode=True)
+ self.tmp_imgs.append('hello-world')
+ for chunk in stream:
+ assert isinstance(chunk, dict)
+ assert len(self.client.images('hello-world')) >= 1
+ img_info = self.client.inspect_image('hello-world')
+ assert 'Id' in img_info
+
+ @requires_api_version('1.32')
+ @requires_experimental(until=None)
+ def test_pull_invalid_platform(self):
+ with pytest.raises(docker.errors.APIError) as excinfo:
+ self.client.pull('hello-world', platform='foobar')
+
+ assert excinfo.value.status_code == 500
+ assert 'invalid platform' in excinfo.exconly()
+
+
+class CommitTest(BaseAPIIntegrationTest):
+ def test_commit(self):
+ container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ res = self.client.commit(id)
+ assert 'Id' in res
+ img_id = res['Id']
+ self.tmp_imgs.append(img_id)
+ img = self.client.inspect_image(img_id)
+ assert 'Container' in img
+ assert img['Container'].startswith(id)
+ assert 'ContainerConfig' in img
+ assert 'Image' in img['ContainerConfig']
+ assert BUSYBOX == img['ContainerConfig']['Image']
+ busybox_id = self.client.inspect_image(BUSYBOX)['Id']
+ assert 'Parent' in img
+ assert img['Parent'] == busybox_id
+
+ def test_commit_with_changes(self):
+ cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ self.tmp_containers.append(cid)
+ self.client.start(cid)
+ img_id = self.client.commit(
+ cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
+ )
+ self.tmp_imgs.append(img_id)
+ img = self.client.inspect_image(img_id)
+ assert 'Container' in img
+ assert img['Container'].startswith(cid['Id'])
+ assert '8000/tcp' in img['Config']['ExposedPorts']
+ assert img['Config']['Cmd'] == ['bash']
+
+
+class RemoveImageTest(BaseAPIIntegrationTest):
+ def test_remove(self):
+ container = self.client.create_container(BUSYBOX, ['touch', '/test'])
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+ res = self.client.commit(id)
+ assert 'Id' in res
+ img_id = res['Id']
+ self.tmp_imgs.append(img_id)
+ logs = self.client.remove_image(img_id, force=True)
+ assert {"Deleted": img_id} in logs
+ images = self.client.images(all=True)
+ res = [x for x in images if x['Id'].startswith(img_id)]
+ assert len(res) == 0
+
+
+class ImportImageTest(BaseAPIIntegrationTest):
+ '''Base class for `docker import` test cases.'''
+
+ TAR_SIZE = 512 * 1024
+
+ def write_dummy_tar_content(self, n_bytes, tar_fd):
+ def extend_file(f, n_bytes):
+ f.seek(n_bytes - 1)
+ f.write(bytearray([65]))
+ f.seek(0)
+
+ tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
+
+ with tempfile.NamedTemporaryFile() as f:
+ extend_file(f, n_bytes)
+ tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
+ tar.addfile(tarinfo, fileobj=f)
+
+ tar.close()
+
+ @contextlib.contextmanager
+ def dummy_tar_stream(self, n_bytes):
+ '''Yields a stream that is valid tar data of size n_bytes.'''
+ with tempfile.NamedTemporaryFile() as tar_file:
+ self.write_dummy_tar_content(n_bytes, tar_file)
+ tar_file.seek(0)
+ yield tar_file
+
+ @contextlib.contextmanager
+ def dummy_tar_file(self, n_bytes):
+ '''Yields the name of a valid tar file of size n_bytes.'''
+ with tempfile.NamedTemporaryFile(delete=False) as tar_file:
+ self.write_dummy_tar_content(n_bytes, tar_file)
+ tar_file.seek(0)
+ yield tar_file.name
+
+ def test_import_from_bytes(self):
+ with self.dummy_tar_stream(n_bytes=500) as f:
+ content = f.read()
+
+ # The generic import_image() function cannot import in-memory bytes
+ # data that happens to be represented as a string type, because
+ # import_image() will try to use it as a filename and usually then
+ # trigger an exception. So we test the import_image_from_data()
+ # function instead.
+ statuses = self.client.import_image_from_data(
+ content, repository='test/import-from-bytes')
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ def test_import_from_file(self):
+ with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
+ # statuses = self.client.import_image(
+ # src=tar_filename, repository='test/import-from-file')
+ statuses = self.client.import_image_from_file(
+ tar_filename, repository='test/import-from-file')
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ assert 'status' in result
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ def test_import_from_stream(self):
+ with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
+ statuses = self.client.import_image(
+ src=tar_stream, repository='test/import-from-stream')
+ # statuses = self.client.import_image_from_stream(
+ # tar_stream, repository='test/import-from-stream')
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ assert 'status' in result
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ def test_import_image_from_data_with_changes(self):
+ with self.dummy_tar_stream(n_bytes=500) as f:
+ content = f.read()
+
+ statuses = self.client.import_image_from_data(
+ content, repository='test/import-from-bytes',
+ changes=['USER foobar', 'CMD ["echo"]']
+ )
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ img_data = self.client.inspect_image(img_id)
+ assert img_data is not None
+ assert img_data['Config']['Cmd'] == ['echo']
+ assert img_data['Config']['User'] == 'foobar'
+
+ def test_import_image_with_changes(self):
+ with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
+ statuses = self.client.import_image(
+ src=tar_filename, repository='test/import-from-file',
+ changes=['USER foobar', 'CMD ["echo"]']
+ )
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+ img_data = self.client.inspect_image(img_id)
+ assert img_data is not None
+ assert img_data['Config']['Cmd'] == ['echo']
+ assert img_data['Config']['User'] == 'foobar'
+
+ # Docs say output is available in 1.23, but this test fails on 1.12.0
+ @requires_api_version('1.24')
+ def test_get_load_image(self):
+ test_img = 'hello-world:latest'
+ self.client.pull(test_img)
+ data = self.client.get_image(test_img)
+ assert data
+ output = self.client.load_image(data)
+ assert any([
+ line for line in output
+ if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
+ ])
+
+ @contextlib.contextmanager
+ def temporary_http_file_server(self, stream):
+ '''Serve data from an IO stream over HTTP.'''
+
+ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header('Content-Type', 'application/x-tar')
+ self.end_headers()
+ shutil.copyfileobj(stream, self.wfile)
+
+ server = socketserver.TCPServer(('', 0), Handler)
+ thread = threading.Thread(target=server.serve_forever)
+ thread.setDaemon(True)
+ thread.start()
+
+ yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
+
+ server.shutdown()
+
+ @pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
+ def test_import_from_url(self):
+ # The crappy test HTTP server doesn't handle large files well, so use
+ # a small file.
+ tar_size = 10240
+
+ with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
+ with self.temporary_http_file_server(tar_data) as url:
+ statuses = self.client.import_image(
+ src=url, repository='test/import-from-url')
+
+ result_text = statuses.splitlines()[-1]
+ result = json.loads(result_text)
+
+ assert 'error' not in result
+
+ assert 'status' in result
+ img_id = result['status']
+ self.tmp_imgs.append(img_id)
+
+
+@requires_api_version('1.25')
+class PruneImagesTest(BaseAPIIntegrationTest):
+ def test_prune_images(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+
+ # Ensure busybox does not get pruned
+ ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.tmp_containers.append(ctnr)
+
+ self.client.pull('hello-world', tag='latest')
+ self.tmp_imgs.append('hello-world')
+ img_id = self.client.inspect_image('hello-world')['Id']
+ result = self.client.prune_images()
+ assert img_id not in [
+ img.get('Deleted') for img in result.get('ImagesDeleted') or []
+ ]
+ result = self.client.prune_images({'dangling': False})
+ assert result['SpaceReclaimed'] > 0
+ assert 'hello-world:latest' in [
+ img.get('Untagged') for img in result['ImagesDeleted']
+ ]
+ assert img_id in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
+
+
+class SaveLoadImagesTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.23')
+ def test_get_image_load_image(self):
+ with tempfile.TemporaryFile() as f:
+ stream = self.client.get_image(BUSYBOX)
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = self.client.load_image(f.read())
+
+ success = False
+ result_line = 'Loaded image: {}\n'.format(BUSYBOX)
+ for data in result:
+ print(data)
+ if 'stream' in data:
+ if data['stream'] == result_line:
+ success = True
+ break
+ assert success is True
+
+
+@requires_api_version('1.30')
+class InspectDistributionTest(BaseAPIIntegrationTest):
+ def test_inspect_distribution(self):
+ data = self.client.inspect_distribution('busybox:latest')
+ assert data is not None
+ assert 'Platforms' in data
+ assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
new file mode 100644
index 0000000..b6726d0
--- /dev/null
+++ b/tests/integration/api_network_test.py
@@ -0,0 +1,474 @@
+import docker
+from docker.types import IPAMConfig, IPAMPool
+import pytest
+
+from ..helpers import random_name, requires_api_version
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class TestNetworks(BaseAPIIntegrationTest):
+ def tearDown(self):
+ super(TestNetworks, self).tearDown()
+ self.client.leave_swarm(force=True)
+
+ def create_network(self, *args, **kwargs):
+ net_name = random_name()
+ net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
+ self.tmp_networks.append(net_id)
+ return (net_name, net_id)
+
+ def test_list_networks(self):
+ networks = self.client.networks()
+
+ net_name, net_id = self.create_network()
+
+ networks = self.client.networks()
+ assert net_id in [n['Id'] for n in networks]
+
+ networks_by_name = self.client.networks(names=[net_name])
+ assert [n['Id'] for n in networks_by_name] == [net_id]
+
+ networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
+ assert [n['Id'] for n in networks_by_partial_id] == [net_id]
+
+ def test_inspect_network(self):
+ net_name, net_id = self.create_network()
+
+ net = self.client.inspect_network(net_id)
+ assert net['Id'] == net_id
+ assert net['Name'] == net_name
+ assert net['Driver'] == 'bridge'
+ assert net['Scope'] == 'local'
+ assert net['IPAM']['Driver'] == 'default'
+
+ def test_create_network_with_ipam_config(self):
+ _, net_id = self.create_network(
+ ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="172.28.0.0/16",
+ iprange="172.28.5.0/24",
+ gateway="172.28.5.254",
+ aux_addresses={
+ "a": "172.28.1.5",
+ "b": "172.28.1.6",
+ "c": "172.28.1.7",
+ },
+ ),
+ ],
+ ),
+ )
+
+ net = self.client.inspect_network(net_id)
+ ipam = net['IPAM']
+
+ assert ipam.pop('Options', None) is None
+
+ assert ipam['Driver'] == 'default'
+
+ assert ipam['Config'] == [{
+ 'Subnet': "172.28.0.0/16",
+ 'IPRange': "172.28.5.0/24",
+ 'Gateway': "172.28.5.254",
+ 'AuxiliaryAddresses': {
+ "a": "172.28.1.5",
+ "b": "172.28.1.6",
+ "c": "172.28.1.7",
+ },
+ }]
+
+ def test_create_network_with_host_driver_fails(self):
+ with pytest.raises(docker.errors.APIError):
+ self.client.create_network(random_name(), driver='host')
+
+ def test_remove_network(self):
+ net_name, net_id = self.create_network()
+ assert net_name in [n['Name'] for n in self.client.networks()]
+
+ self.client.remove_network(net_id)
+ assert net_name not in [n['Name'] for n in self.client.networks()]
+
+ def test_connect_and_disconnect_container(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ network_data = self.client.inspect_network(net_id)
+ assert not network_data.get('Containers')
+
+ self.client.connect_container_to_network(container, net_id)
+ network_data = self.client.inspect_network(net_id)
+ assert list(network_data['Containers'].keys()) == [
+ container['Id']
+ ]
+
+ with pytest.raises(docker.errors.APIError):
+ self.client.connect_container_to_network(container, net_id)
+
+ self.client.disconnect_container_from_network(container, net_id)
+ network_data = self.client.inspect_network(net_id)
+ assert not network_data.get('Containers')
+
+ with pytest.raises(docker.errors.APIError):
+ self.client.disconnect_container_from_network(container, net_id)
+
+ @requires_api_version('1.22')
+ def test_connect_and_force_disconnect_container(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ network_data = self.client.inspect_network(net_id)
+ assert not network_data.get('Containers')
+
+ self.client.connect_container_to_network(container, net_id)
+ network_data = self.client.inspect_network(net_id)
+ assert list(network_data['Containers'].keys()) == \
+ [container['Id']]
+
+ self.client.disconnect_container_from_network(container, net_id, True)
+ network_data = self.client.inspect_network(net_id)
+ assert not network_data.get('Containers')
+
+ with pytest.raises(docker.errors.APIError):
+ self.client.disconnect_container_from_network(
+ container, net_id, force=True
+ )
+
+ @requires_api_version('1.22')
+ def test_connect_with_aliases(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container(BUSYBOX, 'top')
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ self.client.connect_container_to_network(
+ container, net_id, aliases=['foo', 'bar'])
+ container_data = self.client.inspect_container(container)
+ aliases = (
+ container_data['NetworkSettings']['Networks'][net_name]['Aliases']
+ )
+ assert 'foo' in aliases
+ assert 'bar' in aliases
+
+ def test_connect_on_container_create(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container(
+ image=BUSYBOX,
+ command='top',
+ host_config=self.client.create_host_config(network_mode=net_name),
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ network_data = self.client.inspect_network(net_id)
+ assert list(network_data['Containers'].keys()) == \
+ [container['Id']]
+
+ self.client.disconnect_container_from_network(container, net_id)
+ network_data = self.client.inspect_network(net_id)
+ assert not network_data.get('Containers')
+
+ @requires_api_version('1.22')
+ def test_create_with_aliases(self):
+ net_name, net_id = self.create_network()
+
+ container = self.client.create_container(
+ image=BUSYBOX,
+ command='top',
+ host_config=self.client.create_host_config(
+ network_mode=net_name,
+ ),
+ networking_config=self.client.create_networking_config({
+ net_name: self.client.create_endpoint_config(
+ aliases=['foo', 'bar'],
+ ),
+ }),
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ container_data = self.client.inspect_container(container)
+ aliases = (
+ container_data['NetworkSettings']['Networks'][net_name]['Aliases']
+ )
+ assert 'foo' in aliases
+ assert 'bar' in aliases
+
+ @requires_api_version('1.22')
+ def test_create_with_ipv4_address(self):
+ net_name, net_id = self.create_network(
+ ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[IPAMPool(subnet="132.124.0.0/16")],
+ ),
+ )
+ container = self.client.create_container(
+ image=BUSYBOX, command='top',
+ host_config=self.client.create_host_config(network_mode=net_name),
+ networking_config=self.client.create_networking_config({
+ net_name: self.client.create_endpoint_config(
+ ipv4_address='132.124.0.23'
+ )
+ })
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv4Address']\
+ == '132.124.0.23'
+
+ @requires_api_version('1.22')
+ def test_create_with_ipv6_address(self):
+ net_name, net_id = self.create_network(
+ ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[IPAMPool(subnet="2001:389::1/64")],
+ ),
+ )
+ container = self.client.create_container(
+ image=BUSYBOX, command='top',
+ host_config=self.client.create_host_config(network_mode=net_name),
+ networking_config=self.client.create_networking_config({
+ net_name: self.client.create_endpoint_config(
+ ipv6_address='2001:389::f00d'
+ )
+ })
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['IPAMConfig']['IPv6Address']\
+ == '2001:389::f00d'
+
+ @requires_api_version('1.24')
+ def test_create_with_linklocal_ips(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top',
+ networking_config=self.client.create_networking_config(
+ {
+ 'bridge': self.client.create_endpoint_config(
+ link_local_ips=['169.254.8.8']
+ )
+ }
+ ),
+ host_config=self.client.create_host_config(network_mode='bridge')
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ container_data = self.client.inspect_container(container)
+ net_cfg = container_data['NetworkSettings']['Networks']['bridge']
+ assert 'IPAMConfig' in net_cfg
+ assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
+ assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
+
+ @requires_api_version('1.22')
+ def test_create_with_links(self):
+ net_name, net_id = self.create_network()
+
+ container = self.create_and_start(
+ host_config=self.client.create_host_config(network_mode=net_name),
+ networking_config=self.client.create_networking_config({
+ net_name: self.client.create_endpoint_config(
+ links=[('docker-py-test-upstream', 'bar')],
+ ),
+ }),
+ )
+
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
+
+ self.create_and_start(
+ name='docker-py-test-upstream',
+ host_config=self.client.create_host_config(network_mode=net_name),
+ )
+
+ self.execute(container, ['nslookup', 'bar'])
+
+ def test_create_check_duplicate(self):
+ net_name, net_id = self.create_network()
+ with pytest.raises(docker.errors.APIError):
+ self.client.create_network(net_name, check_duplicate=True)
+ net_id = self.client.create_network(net_name, check_duplicate=False)
+ self.tmp_networks.append(net_id['Id'])
+
+ @requires_api_version('1.22')
+ def test_connect_with_links(self):
+ net_name, net_id = self.create_network()
+
+ container = self.create_and_start(
+ host_config=self.client.create_host_config(network_mode=net_name))
+
+ self.client.disconnect_container_from_network(container, net_name)
+ self.client.connect_container_to_network(
+ container, net_name,
+ links=[('docker-py-test-upstream', 'bar')])
+
+ net_settings = self.client.inspect_container(container)[
+ 'NetworkSettings'
+ ]
+ assert net_settings['Networks'][net_name]['Links'] == [
+ 'docker-py-test-upstream:bar'
+ ]
+
+ self.create_and_start(
+ name='docker-py-test-upstream',
+ host_config=self.client.create_host_config(network_mode=net_name),
+ )
+
+ self.execute(container, ['nslookup', 'bar'])
+
+ @requires_api_version('1.22')
+ def test_connect_with_ipv4_address(self):
+ net_name, net_id = self.create_network(
+ ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="172.28.0.0/16", iprange="172.28.5.0/24",
+ gateway="172.28.5.254"
+ )
+ ]
+ )
+ )
+
+ container = self.create_and_start(
+ host_config=self.client.create_host_config(network_mode=net_name))
+
+ self.client.disconnect_container_from_network(container, net_name)
+ self.client.connect_container_to_network(
+ container, net_name, ipv4_address='172.28.5.24'
+ )
+
+ container_data = self.client.inspect_container(container)
+ net_data = container_data['NetworkSettings']['Networks'][net_name]
+ assert net_data['IPAMConfig']['IPv4Address'] == '172.28.5.24'
+
+ @requires_api_version('1.22')
+ def test_connect_with_ipv6_address(self):
+ net_name, net_id = self.create_network(
+ ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="2001:389::1/64", iprange="2001:389::0/96",
+ gateway="2001:389::ffff"
+ )
+ ]
+ )
+ )
+
+ container = self.create_and_start(
+ host_config=self.client.create_host_config(network_mode=net_name))
+
+ self.client.disconnect_container_from_network(container, net_name)
+ self.client.connect_container_to_network(
+ container, net_name, ipv6_address='2001:389::f00d'
+ )
+
+ container_data = self.client.inspect_container(container)
+ net_data = container_data['NetworkSettings']['Networks'][net_name]
+ assert net_data['IPAMConfig']['IPv6Address'] == '2001:389::f00d'
+
+ @requires_api_version('1.23')
+ def test_create_internal_networks(self):
+ _, net_id = self.create_network(internal=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Internal'] is True
+
+ @requires_api_version('1.23')
+ def test_create_network_with_labels(self):
+ _, net_id = self.create_network(labels={
+ 'com.docker.py.test': 'label'
+ })
+
+ net = self.client.inspect_network(net_id)
+ assert 'Labels' in net
+ assert len(net['Labels']) == 1
+ assert net['Labels'] == {
+ 'com.docker.py.test': 'label'
+ }
+
+ @requires_api_version('1.23')
+ def test_create_network_with_labels_wrong_type(self):
+ with pytest.raises(TypeError):
+ self.create_network(labels=['com.docker.py.test=label', ])
+
+ @requires_api_version('1.23')
+ def test_create_network_ipv6_enabled(self):
+ _, net_id = self.create_network(
+ enable_ipv6=True, ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="2001:389::1/64", iprange="2001:389::0/96",
+ gateway="2001:389::ffff"
+ )
+ ]
+ )
+ )
+ net = self.client.inspect_network(net_id)
+ assert net['EnableIPv6'] is True
+
+ @requires_api_version('1.25')
+ def test_create_network_attachable(self):
+ assert self.init_swarm()
+ _, net_id = self.create_network(driver='overlay', attachable=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Attachable'] is True
+
+ @requires_api_version('1.29')
+ def test_create_network_ingress(self):
+ assert self.init_swarm()
+ self.client.remove_network('ingress')
+ _, net_id = self.create_network(driver='overlay', ingress=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Ingress'] is True
+
+ @requires_api_version('1.25')
+ def test_prune_networks(self):
+ net_name, _ = self.create_network()
+ result = self.client.prune_networks()
+ assert net_name in result['NetworksDeleted']
+
+ @requires_api_version('1.31')
+ def test_create_inspect_network_with_scope(self):
+ assert self.init_swarm()
+ net_name_loc, net_id_loc = self.create_network(scope='local')
+
+ assert self.client.inspect_network(net_name_loc)
+ assert self.client.inspect_network(net_name_loc, scope='local')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_loc, scope='global')
+
+ net_name_swarm, net_id_swarm = self.create_network(
+ driver='overlay', scope='swarm'
+ )
+
+ assert self.client.inspect_network(net_name_swarm)
+ assert self.client.inspect_network(net_name_swarm, scope='swarm')
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_network(net_name_swarm, scope='local')
+
+ def test_create_remove_network_with_space_in_name(self):
+ net_id = self.client.create_network('test 01')
+ self.tmp_networks.append(net_id)
+ assert self.client.inspect_network('test 01')
+ assert self.client.remove_network('test 01') is None # does not raise
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
new file mode 100644
index 0000000..1150b09
--- /dev/null
+++ b/tests/integration/api_plugin_test.py
@@ -0,0 +1,145 @@
+import os
+
+import docker
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from ..helpers import requires_api_version
+
+SSHFS = 'vieux/sshfs:latest'
+
+
+@requires_api_version('1.25')
+class PluginTest(BaseAPIIntegrationTest):
+ @classmethod
+ def teardown_class(cls):
+ c = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60,
+ **docker.utils.kwargs_from_env()
+ )
+ try:
+ c.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def teardown_method(self, method):
+ try:
+ self.client.disable_plugin(SSHFS)
+ except docker.errors.APIError:
+ pass
+
+ for p in self.tmp_plugins:
+ try:
+ self.client.remove_plugin(p, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def ensure_plugin_installed(self, plugin_name):
+ try:
+ return self.client.inspect_plugin(plugin_name)
+ except docker.errors.NotFound:
+ prv = self.client.plugin_privileges(plugin_name)
+ for d in self.client.pull_plugin(plugin_name, prv):
+ pass
+ return self.client.inspect_plugin(plugin_name)
+
+ def test_enable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ with pytest.raises(docker.errors.APIError):
+ self.client.enable_plugin(SSHFS)
+
+ def test_disable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ self.client.disable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is False
+ with pytest.raises(docker.errors.APIError):
+ self.client.disable_plugin(SSHFS)
+
+ def test_inspect_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.inspect_plugin(SSHFS)
+ assert 'Config' in data
+ assert 'Name' in data
+ assert data['Name'] == SSHFS
+
+ def test_plugin_privileges(self):
+ prv = self.client.plugin_privileges(SSHFS)
+ assert isinstance(prv, list)
+ for item in prv:
+ assert 'Name' in item
+ assert 'Value' in item
+ assert 'Description' in item
+
+ def test_list_plugins(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.plugins()
+ assert len(data) > 0
+ plugin = [p for p in data if p['Name'] == SSHFS][0]
+ assert 'Config' in plugin
+
+ def test_configure_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ self.client.configure_plugin(SSHFS, {
+ 'DEBUG': '1'
+ })
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'Env' in pl_data['Settings']
+ assert 'DEBUG=1' in pl_data['Settings']['Env']
+
+ self.client.configure_plugin(SSHFS, ['DEBUG=0'])
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'DEBUG=0' in pl_data['Settings']['Env']
+
+ def test_remove_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.remove_plugin(SSHFS) is True
+
+ def test_force_remove_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ self.client.enable_plugin(SSHFS)
+ assert self.client.inspect_plugin(SSHFS)['Enabled'] is True
+ assert self.client.remove_plugin(SSHFS, force=True) is True
+
+ def test_install_plugin(self):
+ try:
+ self.client.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.pull_plugin(SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ @requires_api_version('1.26')
+ def test_upgrade_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.upgrade_plugin(SSHFS, SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ def test_create_plugin(self):
+ plugin_data_dir = os.path.join(
+ os.path.dirname(__file__), os.path.join('testdata', 'dummy-plugin')
+ )
+ assert self.client.create_plugin(
+ 'docker-sdk-py/dummy', plugin_data_dir
+ )
+ self.tmp_plugins.append('docker-sdk-py/dummy')
+ data = self.client.inspect_plugin('docker-sdk-py/dummy')
+ assert data['Config']['Entrypoint'] == ['/dummy']
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
new file mode 100644
index 0000000..b3d93b8
--- /dev/null
+++ b/tests/integration/api_secret_test.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.25')
+class SecretAPITest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def test_create_secret(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_secret_unicode_data(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == secret_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ assert self.client.remove_secret(secret_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_secret(secret_id)
+
+ def test_list_secrets(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ data = self.client.secrets(filters={'names': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == secret_id['ID']
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
new file mode 100644
index 0000000..85f9dcc
--- /dev/null
+++ b/tests/integration/api_service_test.py
@@ -0,0 +1,1255 @@
+# -*- coding: utf-8 -*-
+
+import random
+import time
+
+import docker
+import pytest
+import six
+
+from ..helpers import (
+ force_leave_swarm, requires_api_version, requires_experimental
+)
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class ServiceTest(BaseAPIIntegrationTest):
+ @classmethod
+ def setup_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+ cls._init_swarm(client)
+
+ @classmethod
+ def teardown_class(cls):
+ client = cls.get_client_instance()
+ force_leave_swarm(client)
+
+ def tearDown(self):
+ for service in self.client.services(filters={'name': 'dockerpytest_'}):
+ try:
+ self.client.remove_service(service['ID'])
+ except docker.errors.APIError:
+ pass
+ super(ServiceTest, self).tearDown()
+
+ def get_service_name(self):
+ return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+ def get_service_container(self, service_name, attempts=20, interval=0.5,
+ include_stopped=False):
+ # There is some delay between the service's creation and the creation
+ # of the service's containers. This method deals with the uncertainty
+ # when trying to retrieve the container associated with a service.
+ while True:
+ containers = self.client.containers(
+ filters={'name': [service_name]}, quiet=True,
+ all=include_stopped
+ )
+ if len(containers) > 0:
+ return containers[0]
+ attempts -= 1
+ if attempts <= 0:
+ return None
+ time.sleep(interval)
+
+ def create_simple_service(self, name=None, labels=None):
+ if name:
+ name = 'dockerpytest_{0}'.format(name)
+ else:
+ name = self.get_service_name()
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ return name, self.client.create_service(
+ task_tmpl, name=name, labels=labels
+ )
+
+ @requires_api_version('1.24')
+ def test_list_services(self):
+ services = self.client.services()
+ assert isinstance(services, list)
+
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+ self.create_simple_service()
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 1
+ assert 'dockerpytest_' in test_services[0]['Spec']['Name']
+
+ @requires_api_version('1.24')
+ def test_list_services_filter_by_label(self):
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 0
+ self.create_simple_service(labels={'test_label': 'testing'})
+ test_services = self.client.services(filters={'label': 'test_label'})
+ assert len(test_services) == 1
+ assert test_services[0]['Spec']['Labels']['test_label'] == 'testing'
+
+ def test_inspect_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_inspect_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_name)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ @requires_api_version('1.29')
+ def test_inspect_service_insert_defaults(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ svc_info_defaults = self.client.inspect_service(
+ svc_id, insert_defaults=True
+ )
+ assert svc_info != svc_info_defaults
+ assert 'RollbackConfig' in svc_info_defaults['Spec']
+ assert 'RollbackConfig' not in svc_info['Spec']
+
+ def test_remove_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_id)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_remove_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_name)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_create_service_simple(self):
+ name, svc_id = self.create_simple_service()
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+
+ @requires_api_version('1.25')
+ @requires_experimental(until='1.29')
+ def test_service_logs(self):
+ name, svc_id = self.create_simple_service()
+ assert self.get_service_container(name, include_stopped=True)
+ attempts = 20
+ while True:
+ if attempts == 0:
+ self.fail('No service logs produced by endpoint')
+ return
+ logs = self.client.service_logs(svc_id, stdout=True, is_tty=False)
+ try:
+ log_line = next(logs)
+ except StopIteration:
+ attempts -= 1
+ time.sleep(0.1)
+ continue
+ else:
+ break
+
+ if six.PY3:
+ log_line = log_line.decode('utf-8')
+ assert 'hello\n' in log_line
+
+ def test_create_service_custom_log_driver(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ log_cfg = docker.types.DriverConfig('none')
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, log_driver=log_cfg
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'LogDriver' in res_template
+ assert 'Name' in res_template['LogDriver']
+ assert res_template['LogDriver']['Name'] == 'none'
+
+ def test_create_service_with_volume_mount(self):
+ vol_name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['ls'],
+ mounts=[
+ docker.types.Mount(target='/test', source=vol_name)
+ ]
+ )
+ self.tmp_volumes.append(vol_name)
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Mounts' in cspec
+ assert len(cspec['Mounts']) == 1
+ mount = cspec['Mounts'][0]
+ assert mount['Target'] == '/test'
+ assert mount['Source'] == vol_name
+ assert mount['Type'] == 'volume'
+
+ def test_create_service_with_resources_constraints(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ resources = docker.types.Resources(
+ cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
+ cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ assert res_template['Resources']['Limits'] == resources['Limits']
+ assert res_template['Resources']['Reservations'] == resources[
+ 'Reservations'
+ ]
+
+ def _create_service_with_generic_resources(self, generic_resources):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+
+ resources = docker.types.Resources(
+ generic_resources=generic_resources
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ return resources, self.client.inspect_service(svc_id)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_generic_resources(self):
+ successful = [{
+ 'input': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 1}},
+ {'NamedResourceSpec': {'Kind': 'gpu', 'Value': 'test'}}
+ ]}, {
+ 'input': {'gpu': 2, 'mpi': 'latest'},
+ 'expected': [
+ {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 2}},
+ {'NamedResourceSpec': {'Kind': 'mpi', 'Value': 'latest'}}
+ ]}
+ ]
+
+ for test in successful:
+ t = test['input']
+ resrcs, svc_info = self._create_service_with_generic_resources(t)
+
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ res_reservations = res_template['Resources']['Reservations']
+ assert res_reservations == resrcs['Reservations']
+ assert 'GenericResources' in res_reservations
+
+ def _key(d, specs=('DiscreteResourceSpec', 'NamedResourceSpec')):
+ return [d.get(s, {}).get('Kind', '') for s in specs]
+
+ actual = res_reservations['GenericResources']
+ expected = test.get('expected', test['input'])
+ assert sorted(actual, key=_key) == sorted(expected, key=_key)
+
+ @requires_api_version('1.32')
+ def test_create_service_with_invalid_generic_resources(self):
+ for test_input in ['1', 1.0, lambda: '1', {1, 2}]:
+ with pytest.raises(docker.errors.InvalidArgument):
+ self._create_service_with_generic_resources(test_input)
+
+ def test_create_service_with_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_update_config_monitor(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Monitor'] == uc['Monitor']
+ assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+
+ def test_create_service_with_restart_policy(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ policy = docker.types.RestartPolicy(
+ docker.types.RestartPolicy.condition_types.ANY,
+ delay=5, max_attempts=5
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, restart_policy=policy
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
+ assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
+
+ def test_create_service_with_custom_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ def test_create_service_with_placement(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=['node.id=={}'.format(node_id)]
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
+ {'Constraints': ['node.id=={}'.format(node_id)]})
+
+ def test_create_service_with_placement_object(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(
+ constraints=['node.id=={}'.format(node_id)]
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.30')
+ def test_create_service_with_placement_platform(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.27')
+ def test_create_service_with_placement_preferences(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(preferences=[
+ {'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
+ ])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ def test_create_service_with_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ @requires_api_version('1.32')
+ def test_create_service_with_endpoint_spec_host_publish_mode(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, None, 'host'),
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ assert len(ports) == 1
+ port = ports[0]
+ assert port['PublishedPort'] == 12357
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'tcp'
+ assert port['PublishMode'] == 'host'
+
+ def test_create_service_with_env(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Env' in con_spec
+ assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
+
+ @requires_api_version('1.29')
+ def test_create_service_with_update_order(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, order='start-first'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['Order'] == uc['Order']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_tty(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], tty=True
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'TTY' in con_spec
+ assert con_spec['TTY'] is True
+
+ @requires_api_version('1.25')
+ def test_create_service_with_tty_dict(self):
+ container_spec = {
+ 'Image': BUSYBOX,
+ 'Command': ['true'],
+ 'TTY': True
+ }
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'TTY' in con_spec
+ assert con_spec['TTY'] is True
+
+ def test_create_service_global_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, mode='global'
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Global' in svc_info['Spec']['Mode']
+
+ def test_create_service_replicated_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode('replicated', 5)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
+
+ @requires_api_version('1.25')
+ def test_update_service_force_update(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self.client.update_service(name, version_index, task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
+
+ @requires_api_version('1.25')
+ def test_create_service_with_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = b'phantasmagoria of flower view'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ assert self.client.exec_start(exec_id) == secret_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_unicode_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = u'東方花映塚'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ container_secret = self.client.exec_start(exec_id)
+ container_secret = container_secret.decode('utf-8')
+ assert container_secret == secret_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_config(self):
+ config_name = 'favorite_touhou'
+ config_data = b'phantasmagoria of flower view'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ assert self.client.exec_start(exec_id) == config_data
+
+ @requires_api_version('1.30')
+ def test_create_service_with_unicode_config(self):
+ config_name = 'favorite_touhou'
+ config_data = u'東方花映塚'
+ config_id = self.client.create_config(config_name, config_data)
+ self.tmp_configs.append(config_id)
+ config_ref = docker.types.ConfigReference(config_id, config_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], configs=[config_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
+ assert configs[0] == config_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /{0}'.format(config_name)
+ )
+ container_config = self.client.exec_start(exec_id)
+ container_config = container_config.decode('utf-8')
+ assert container_config == config_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hosts(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hosts={
+ 'foobar': '127.0.0.1',
+ 'baz': '8.8.8.8',
+ }
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hosts' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ hosts = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
+ assert len(hosts) == 2
+ assert '127.0.0.1 foobar' in hosts
+ assert '8.8.8.8 baz' in hosts
+
+ @requires_api_version('1.25')
+ def test_create_service_with_hostname(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], hostname='foobar.baz.com'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Hostname' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hostname'] ==
+ 'foobar.baz.com'
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_groups(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], groups=['shrinemaidens', 'youkais']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Groups' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ groups = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Groups']
+ assert len(groups) == 2
+ assert 'shrinemaidens' in groups
+ assert 'youkais' in groups
+
+ @requires_api_version('1.25')
+ def test_create_service_with_dns_config(self):
+ dns_config = docker.types.DNSConfig(
+ nameservers=['8.8.8.8', '8.8.4.4'],
+ search=['local'], options=['debug']
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], dns_config=dns_config
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'DNSConfig' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ dns_config ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
+ )
+
+ @requires_api_version('1.25')
+ def test_create_service_with_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ @requires_api_version('1.28')
+ def test_create_service_with_readonly(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], read_only=True
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'ReadOnly' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert svc_info['Spec']['TaskTemplate']['ContainerSpec']['ReadOnly']
+
+ @requires_api_version('1.28')
+ def test_create_service_with_stop_signal(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'StopSignal' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['StopSignal'] ==
+ 'SIGINT'
+ )
+
+ @requires_api_version('1.30')
+ def test_create_service_with_privileges(self):
+ priv = docker.types.Privileges(selinux_disable=True)
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], privileges=priv
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Privileges' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ privileges = (
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Privileges']
+ )
+ assert privileges['SELinuxContext']['Disable'] is True
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_name(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Name' in svc_info['Spec']
+ assert svc_info['Spec']['Name'] == name
+
+ @requires_api_version('1.25')
+ def test_update_service_with_defaults_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode(mode='replicated', replicas=2)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
+ assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
+
+ def test_update_service_with_defaults_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ newer_index = svc_info['Version']['Index']
+ assert newer_index > new_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+
+ def test_update_service_with_defaults_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ def test_update_service_with_defaults_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ self._update_service(
+ svc_id, name, new_index, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Networks'] == [
+ {'Target': net1['Id']}
+ ]
+
+ def test_update_service_with_defaults_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ print(svc_info)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={'force': 'update'},
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ @requires_api_version('1.25')
+ def test_update_service_remove_healthcheck(self):
+ second = 1000000000
+ hc = docker.types.Healthcheck(
+ test='true', retries=3, timeout=1 * second,
+ start_period=3 * second, interval=int(second / 2),
+ )
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck=hc
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert (
+ 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ )
+ assert (
+ hc ==
+ svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
+ )
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['sleep', '999'], healthcheck={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert (
+ 'Healthcheck' not in container_spec or
+ not container_spec['Healthcheck']
+ )
+
+ def test_update_service_remove_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Labels' in svc_info['Spec']
+ assert 'service.label' in svc_info['Spec']['Labels']
+ assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ self._update_service(
+ svc_id, name, version_index, labels={}, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert not svc_info['Spec'].get('Labels')
+
+ def test_update_service_remove_container_labels(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={'container.label': 'SampleLabel'}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
+ assert labels['container.label'] == 'SampleLabel'
+ version_index = svc_info['Version']['Index']
+
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello'],
+ labels={}
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert not container_spec.get('Labels')
+
+ @requires_api_version('1.29')
+ def test_update_service_with_network_change(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ net1 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ self.get_service_name(), driver='overlay',
+ ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[net1['Id']]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert len(svc_info['Spec']['Networks']) > 0
+ assert svc_info['Spec']['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ self._update_service(
+ svc_id, name, version_index, task_tmpl, name=name,
+ networks=[net2['Id']], fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+
+ self._update_service(
+ svc_id, name, new_index, name=name, networks=[net1['Id']],
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'ContainerSpec' in task_template
+ new_spec = task_template['ContainerSpec']
+ assert 'Image' in new_spec
+ assert new_spec['Image'].split(':')[0] == 'busybox'
+ assert 'Command' in new_spec
+ assert new_spec['Command'] == ['echo', 'hello']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net1['Id']
+
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, networks=[net2['Id']]
+ )
+ self._update_service(
+ svc_id, name, new_index, task_tmpl, name=name,
+ fetch_current_spec=True
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ task_template = svc_info['Spec']['TaskTemplate']
+ assert 'Networks' in task_template
+ assert len(task_template['Networks']) > 0
+ assert task_template['Networks'][0]['Target'] == net2['Id']
+
+ def _update_service(self, svc_id, *args, **kwargs):
+ # service update tests seem to be a bit flaky
+ # give them a chance to retry the update with a new version index
+ try:
+ self.client.update_service(*args, **kwargs)
+ except docker.errors.APIError as e:
+ if e.explanation.endswith("update out of sequence"):
+ svc_info = self.client.inspect_service(svc_id)
+ version_index = svc_info['Version']['Index']
+
+ if len(args) > 1:
+ args = (args[0], version_index) + args[2:]
+ else:
+ kwargs['version'] = version_index
+
+ self.client.update_service(*args, **kwargs)
+ else:
+ raise
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
new file mode 100644
index 0000000..dbf3786
--- /dev/null
+++ b/tests/integration/api_swarm_test.py
@@ -0,0 +1,207 @@
+import copy
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+class SwarmTest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(SwarmTest, self).setUp()
+ force_leave_swarm(self.client)
+ self._unlock_key = None
+
+ def tearDown(self):
+ super(SwarmTest, self).tearDown()
+ try:
+ if self._unlock_key:
+ self.client.unlock_swarm(self._unlock_key)
+ except docker.errors.APIError:
+ pass
+
+ force_leave_swarm(self.client)
+
+ @requires_api_version('1.24')
+ def test_init_swarm_simple(self):
+ assert self.init_swarm()
+
+ @requires_api_version('1.24')
+ def test_init_swarm_force_new_cluster(self):
+ pytest.skip('Test stalls the engine on 1.12.0')
+
+ assert self.init_swarm()
+ version_1 = self.client.inspect_swarm()['Version']['Index']
+ assert self.client.init_swarm(force_new_cluster=True)
+ version_2 = self.client.inspect_swarm()['Version']['Index']
+ assert version_2 != version_1
+
+ @requires_api_version('1.24')
+ def test_init_already_in_cluster(self):
+ assert self.init_swarm()
+ with pytest.raises(docker.errors.APIError):
+ self.init_swarm()
+
+ @requires_api_version('1.24')
+ def test_init_swarm_custom_raft_spec(self):
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+
+ @requires_api_version('1.30')
+ def test_init_swarm_with_ca_config(self):
+ spec = self.client.create_swarm_spec(
+ node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000
+ )
+
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+ assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == (
+ spec['CAConfig']['NodeCertExpiry']
+ )
+ assert swarm_info['Spec']['CAConfig']['ForceRotate'] == (
+ spec['CAConfig']['ForceRotate']
+ )
+
+ @requires_api_version('1.25')
+ def test_init_swarm_with_autolock_managers(self):
+ spec = self.client.create_swarm_spec(autolock_managers=True)
+ assert self.init_swarm(swarm_spec=spec)
+ # save unlock key for tearDown
+ self._unlock_key = self.client.get_unlock_key()
+ swarm_info = self.client.inspect_swarm()
+
+ assert (
+ swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True
+ )
+
+ assert self._unlock_key.get('UnlockKey')
+
+ @requires_api_version('1.25')
+ @pytest.mark.xfail(
+ reason="This doesn't seem to be taken into account by the engine"
+ )
+ def test_init_swarm_with_log_driver(self):
+ spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}}
+ assert self.init_swarm(swarm_spec=spec)
+ swarm_info = self.client.inspect_swarm()
+
+ assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == (
+ 'syslog'
+ )
+
+ @requires_api_version('1.24')
+ def test_leave_swarm(self):
+ assert self.init_swarm()
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.leave_swarm()
+ exc_info.value.response.status_code == 500
+ assert self.client.leave_swarm(force=True)
+ with pytest.raises(docker.errors.APIError) as exc_info:
+ self.client.inspect_swarm()
+ exc_info.value.response.status_code == 406
+ assert self.client.leave_swarm(force=True)
+
+ @requires_api_version('1.24')
+ def test_update_swarm(self):
+ assert self.init_swarm()
+ swarm_info_1 = self.client.inspect_swarm()
+ spec = self.client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200,
+ node_cert_expiry=7776000000000000
+ )
+ assert self.client.update_swarm(
+ version=swarm_info_1['Version']['Index'],
+ swarm_spec=spec, rotate_worker_token=True
+ )
+ swarm_info_2 = self.client.inspect_swarm()
+
+ assert (
+ swarm_info_1['Version']['Index'] !=
+ swarm_info_2['Version']['Index']
+ )
+ assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
+ assert (
+ swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Manager'] ==
+ swarm_info_2['JoinTokens']['Manager']
+ )
+ assert (
+ swarm_info_1['JoinTokens']['Worker'] !=
+ swarm_info_2['JoinTokens']['Worker']
+ )
+
+ @requires_api_version('1.24')
+ def test_list_nodes(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ assert 'ID' in node
+ assert 'Spec' in node
+ assert node['Spec']['Role'] == 'manager'
+
+ filtered_list = self.client.nodes(filters={
+ 'id': node['ID']
+ })
+ assert len(filtered_list) == 1
+ filtered_list = self.client.nodes(filters={
+ 'role': 'worker'
+ })
+ assert len(filtered_list) == 0
+
+ @requires_api_version('1.24')
+ def test_inspect_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ assert len(nodes_list) == 1
+ node = nodes_list[0]
+ node_data = self.client.inspect_node(node['ID'])
+ assert node['ID'] == node_data['ID']
+ assert node['Version'] == node_data['Version']
+
+ @requires_api_version('1.24')
+ def test_update_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node = nodes_list[0]
+ orig_spec = node['Spec']
+
+ # add a new label
+ new_spec = copy.deepcopy(orig_spec)
+ new_spec['Labels'] = {'new.label': 'new value'}
+ self.client.update_node(node_id=node['ID'],
+ version=node['Version']['Index'],
+ node_spec=new_spec)
+ updated_node = self.client.inspect_node(node['ID'])
+ assert new_spec == updated_node['Spec']
+
+ # Revert the changes
+ self.client.update_node(node_id=node['ID'],
+ version=updated_node['Version']['Index'],
+ node_spec=orig_spec)
+ reverted_node = self.client.inspect_node(node['ID'])
+ assert orig_spec == reverted_node['Spec']
+
+ @requires_api_version('1.24')
+ def test_remove_main_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node_id = nodes_list[0]['ID']
+ with pytest.raises(docker.errors.NotFound):
+ self.client.remove_node('foobar01')
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id)
+
+ assert e.value.response.status_code >= 400
+
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id, True)
+
+ assert e.value.response.status_code >= 400
diff --git a/tests/integration/api_volume_test.py b/tests/integration/api_volume_test.py
new file mode 100644
index 0000000..8e7dd3a
--- /dev/null
+++ b/tests/integration/api_volume_test.py
@@ -0,0 +1,69 @@
+import docker
+import pytest
+
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+class TestVolumes(BaseAPIIntegrationTest):
+ def test_create_volume(self):
+ name = 'perfectcherryblossom'
+ self.tmp_volumes.append(name)
+ result = self.client.create_volume(name)
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+
+ def test_create_volume_invalid_driver(self):
+ driver_name = 'invalid.driver'
+
+ with pytest.raises(docker.errors.NotFound):
+ self.client.create_volume('perfectcherryblossom', driver_name)
+
+ def test_list_volumes(self):
+ name = 'imperishablenight'
+ self.tmp_volumes.append(name)
+ volume_info = self.client.create_volume(name)
+ result = self.client.volumes()
+ assert 'Volumes' in result
+ volumes = result['Volumes']
+ assert volume_info in volumes
+
+ def test_inspect_volume(self):
+ name = 'embodimentofscarletdevil'
+ self.tmp_volumes.append(name)
+ volume_info = self.client.create_volume(name)
+ result = self.client.inspect_volume(name)
+ assert volume_info == result
+
+ def test_inspect_nonexistent_volume(self):
+ name = 'embodimentofscarletdevil'
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_volume(name)
+
+ def test_remove_volume(self):
+ name = 'shootthebullet'
+ self.tmp_volumes.append(name)
+ self.client.create_volume(name)
+ self.client.remove_volume(name)
+
+ @requires_api_version('1.25')
+ def test_force_remove_volume(self):
+ name = 'shootthebullet'
+ self.tmp_volumes.append(name)
+ self.client.create_volume(name)
+ self.client.remove_volume(name, force=True)
+
+ @requires_api_version('1.25')
+ def test_prune_volumes(self):
+ name = 'hopelessmasquerade'
+ self.client.create_volume(name)
+ self.tmp_volumes.append(name)
+ result = self.client.prune_volumes()
+ assert name in result['VolumesDeleted']
+
+ def test_remove_nonexistent_volume(self):
+ name = 'shootthebullet'
+ with pytest.raises(docker.errors.NotFound):
+ self.client.remove_volume(name)
diff --git a/tests/integration/base.py b/tests/integration/base.py
new file mode 100644
index 0000000..56c23ed
--- /dev/null
+++ b/tests/integration/base.py
@@ -0,0 +1,125 @@
+import os
+import shutil
+import unittest
+
+import docker
+from docker.utils import kwargs_from_env
+
+from .. import helpers
+
+BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
+
+
+class BaseIntegrationTest(unittest.TestCase):
+ """
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
+ """
+
+ def setUp(self):
+ self.tmp_imgs = []
+ self.tmp_containers = []
+ self.tmp_folders = []
+ self.tmp_volumes = []
+ self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
+ self.tmp_configs = []
+
+ def tearDown(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True, v=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for config in self.tmp_configs:
+ try:
+ client.api.remove_config(config)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+
+ def setUp(self):
+ super(BaseAPIIntegrationTest, self).setUp()
+ self.client = self.get_client_instance()
+
+ def tearDown(self):
+ super(BaseAPIIntegrationTest, self).tearDown()
+ self.client.close()
+
+ @staticmethod
+ def get_client_instance():
+ return docker.APIClient(
+ version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
+ )
+
+ @staticmethod
+ def _init_swarm(client, **kwargs):
+ return client.init_swarm(
+ '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
+
+ def run_container(self, *args, **kwargs):
+ container = self.client.create_container(*args, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)['StatusCode']
+
+ if exitcode != 0:
+ output = self.client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}"
+ .format(exitcode, output))
+
+ return container
+
+ def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
+ container = self.client.create_container(
+ image=image, command=command, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ return container
+
+ def execute(self, container, cmd, exit_code=0, **kwargs):
+ exc = self.client.exec_create(container, cmd, **kwargs)
+ output = self.client.exec_start(exc)
+ actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
+ msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
+ " ".join(cmd), exit_code, actual_exit_code, output)
+ assert actual_exit_code == exit_code, msg
+
+ def init_swarm(self, **kwargs):
+ return self._init_swarm(self.client, **kwargs)
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
new file mode 100644
index 0000000..7df172c
--- /dev/null
+++ b/tests/integration/client_test.py
@@ -0,0 +1,49 @@
+import threading
+import unittest
+
+import docker
+
+from datetime import datetime, timedelta
+
+from ..helpers import requires_api_version
+from .base import TEST_API_VERSION
+
+
+class ClientTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_info(self):
+ info = self.client.info()
+ assert 'ID' in info
+ assert 'Name' in info
+
+ def test_ping(self):
+ assert self.client.ping() is True
+
+ def test_version(self):
+ assert 'Version' in self.client.version()
+
+ @requires_api_version('1.25')
+ def test_df(self):
+ data = self.client.df()
+ assert 'LayersSize' in data
+ assert 'Containers' in data
+ assert 'Volumes' in data
+ assert 'Images' in data
+
+
+class CancellableEventsTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_cancel_events(self):
+ start = datetime.now()
+
+ events = self.client.events(until=start + timedelta(seconds=5))
+
+ cancel_thread = threading.Timer(2, events.close)
+ cancel_thread.start()
+
+ for _ in events:
+ pass
+
+ self.assertLess(datetime.now() - start, timedelta(seconds=3))
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
new file mode 100644
index 0000000..4e8d268
--- /dev/null
+++ b/tests/integration/conftest.py
@@ -0,0 +1,29 @@
+from __future__ import print_function
+
+import sys
+import warnings
+
+import docker.errors
+from docker.utils import kwargs_from_env
+import pytest
+
+from .base import BUSYBOX
+
+
+@pytest.fixture(autouse=True, scope='session')
+def setup_test_session():
+ warnings.simplefilter('error')
+ c = docker.APIClient(version='auto', **kwargs_from_env())
+ try:
+ c.inspect_image(BUSYBOX)
+ except docker.errors.NotFound:
+ print("\npulling {0}".format(BUSYBOX), file=sys.stderr)
+ for data in c.pull(BUSYBOX, stream=True, decode=True):
+ status = data.get("status")
+ progress = data.get("progress")
+ detail = "{0} - {1}".format(status, progress)
+ print(detail, file=sys.stderr)
+
+ # Double make sure we now have busybox
+ c.inspect_image(BUSYBOX)
+ c.close()
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
new file mode 100644
index 0000000..ac74d72
--- /dev/null
+++ b/tests/integration/errors_test.py
@@ -0,0 +1,15 @@
+from docker.errors import APIError
+from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
+
+
+class ErrorsTest(BaseAPIIntegrationTest):
+ def test_api_error_parses_json(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '10'])
+ self.client.start(container['Id'])
+ with pytest.raises(APIError) as cm:
+ self.client.remove_container(container['Id'])
+ explanation = cm.value.explanation
+ assert 'You cannot remove a running container' in explanation
+ assert '{"message":' not in explanation
+ self.client.remove_container(container['Id'], force=True)
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
new file mode 100644
index 0000000..ab41ea5
--- /dev/null
+++ b/tests/integration/models_containers_test.py
@@ -0,0 +1,364 @@
+import tempfile
+import threading
+
+import docker
+import pytest
+from .base import BaseIntegrationTest, TEST_API_VERSION
+from ..helpers import random_name, requires_api_version
+
+
+class ContainerCollectionTest(BaseIntegrationTest):
+
+ def test_run(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ assert client.containers.run(
+ "alpine", "echo hello world", remove=True
+ ) == b'hello world\n'
+
+ def test_run_detach(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['Config']['Image'] == "alpine"
+ assert container.attrs['Config']['Cmd'] == ['sleep', '300']
+
+ def test_run_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ContainerError) as cm:
+ client.containers.run("alpine", "cat /test", remove=True)
+ assert cm.value.exit_status == 1
+ assert "cat /test" in cm.exconly()
+ assert "alpine" in cm.exconly()
+ assert "No such file or directory" in cm.exconly()
+
+ def test_run_with_image_that_does_not_exist(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ImageNotFound):
+ client.containers.run("dockerpytest_does_not_exist")
+
+ @pytest.mark.skipif(
+ docker.constants.IS_WINDOWS_PLATFORM, reason="host mounts on Windows"
+ )
+ def test_run_with_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ path = tempfile.mkdtemp()
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["%s:/insidecontainer" % path],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ name = "container_volume_test"
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["%s:/insidecontainer" % path],
+ name=name
+ )
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
+
+ def test_run_with_named_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create(name="somevolume")
+ self.tmp_volumes.append(volume.id)
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["somevolume:/insidecontainer"],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ name = "container_volume_test"
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["somevolume:/insidecontainer"],
+ name=name
+ )
+ self.tmp_containers.append(name)
+ assert out == b'hello\n'
+
+ def test_run_with_network(self):
+ net_name = random_name()
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.networks.create(net_name)
+ self.tmp_networks.append(net_name)
+
+ container = client.containers.run(
+ 'alpine', 'echo hello world', network=net_name,
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+
+ attrs = container.attrs
+
+ assert 'NetworkSettings' in attrs
+ assert 'Networks' in attrs['NetworkSettings']
+ assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
+
+ def test_run_with_none_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ out = client.containers.run(
+ "alpine", "echo hello",
+ log_config=dict(type='none')
+ )
+ assert out is None
+
+ def test_run_with_json_file_driver(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ out = client.containers.run(
+ "alpine", "echo hello",
+ log_config=dict(type='json-file')
+ )
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'echo hello', auto_remove=True
+ )
+ assert out == b'hello\n'
+
+ @requires_api_version('1.25')
+ def test_run_with_auto_remove_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ContainerError) as e:
+ client.containers.run(
+ 'alpine', 'sh -c ">&2 echo error && exit 1"', auto_remove=True
+ )
+ assert e.value.exit_status == 1
+ assert e.value.stderr is None
+
+ def test_run_with_streamed_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+ logs = [line for line in out]
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
+
+ @pytest.mark.timeout(5)
+ def test_run_with_streamed_logs_and_cancel(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ out = client.containers.run(
+ 'alpine', 'sh -c "echo hello && echo world"', stream=True
+ )
+
+ threading.Timer(1, out.close).start()
+
+ logs = [line for line in out]
+
+ assert len(logs) == 2
+ assert logs[0] == b'hello\n'
+ assert logs[1] == b'world\n'
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert client.containers.get(container.id).attrs[
+ 'Config']['Image'] == "alpine"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list() if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Config']['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+ def test_list_sparse(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list(sparse=True) if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Image'] == 'alpine'
+ assert container.status == 'running'
+ assert container.image == client.images.get('alpine')
+ with pytest.raises(docker.errors.DockerException):
+ container.labels
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+
+class ContainerTest(BaseIntegrationTest):
+
+ def test_commit(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test'",
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+ image = container.commit()
+ assert client.containers.run(
+ image.id, "cat /test", remove=True
+ ) == b"hello\n"
+
+ def test_diff(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "touch /test", detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.diff() == [{'Path': '/test', 'Kind': 1}]
+
+ def test_exec_run_success(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ exec_output = container.exec_run("cat /test")
+ assert exec_output[0] == 0
+ assert exec_output[1] == b"hello\n"
+
+ def test_exec_run_failed(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ exec_output = container.exec_run("docker ps")
+ assert exec_output[0] == 126
+
+ def test_kill(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ while container.status != 'running':
+ container.reload()
+ assert container.status == 'running'
+ container.kill()
+ container.reload()
+ assert container.status == 'exited'
+
+ def test_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello world",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.logs() == b"hello world\n"
+
+ def test_pause(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ container.pause()
+ container.reload()
+ assert container.status == "paused"
+ container.unpause()
+ container.reload()
+ assert container.status == "running"
+
+ def test_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.id in [c.id for c in client.containers.list(all=True)]
+ container.wait()
+ container.remove()
+ containers = client.containers.list(all=True)
+ assert container.id not in [c.id for c in containers]
+
+ def test_rename(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", name="test1",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.name == "test1"
+ container.rename("test2")
+ container.reload()
+ assert container.name == "test2"
+
+ def test_restart(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.restart()
+ container.reload()
+ second_started_at = container.attrs['State']['StartedAt']
+ assert first_started_at != second_started_at
+
+ def test_start(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.create("alpine", "sleep 50", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status == "created"
+ container.start()
+ container.reload()
+ assert container.status == "running"
+
+ def test_stats(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ stats = container.stats(stream=False)
+ for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in stats
+
+ def test_stop(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "top", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status in ("running", "created")
+ container.stop(timeout=2)
+ container.reload()
+ assert container.status == "exited"
+
+ def test_top(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True)
+ self.tmp_containers.append(container.id)
+ top = container.top()
+ assert len(top['Processes']) == 1
+ assert 'sleep 60' in top['Processes'][0]
+
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True,
+ cpu_shares=2)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['CpuShares'] == 2
+ container.update(cpu_shares=3)
+ container.reload()
+ assert container.attrs['HostConfig']['CpuShares'] == 3
+
+ def test_wait(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sh -c 'exit 0'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait()['StatusCode'] == 0
+ container = client.containers.run("alpine", "sh -c 'exit 1'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait()['StatusCode'] == 1
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
new file mode 100644
index 0000000..ae735ba
--- /dev/null
+++ b/tests/integration/models_images_test.py
@@ -0,0 +1,136 @@
+import io
+import tempfile
+
+import docker
+import pytest
+
+from .base import BaseIntegrationTest, BUSYBOX, TEST_API_VERSION
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_build(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ ))
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ # @pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
+ def test_build_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.BuildError) as cm:
+ client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "RUN exit 1".encode('ascii')
+ ))
+ assert (
+ "The command '/bin/sh -c exit 1' returned a non-zero code: 1"
+ ) in cm.exconly()
+ assert cm.value.build_log
+
+ def test_build_with_multiple_success(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(
+ tag='some-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ def test_build_with_success_build_output(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image, _ = client.images.build(
+ tag='dup-txt-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo Successfully built abcd1234".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"Successfully built abcd1234\n"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list())
+
+ def test_list_with_repository(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list('alpine'))
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+ def test_pull(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert 'alpine:latest' in image.attrs['RepoTags']
+
+ def test_pull_with_tag(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine', tag='3.3')
+ assert 'alpine:3.3' in image.attrs['RepoTags']
+
+ def test_pull_with_sha(self):
+ image_ref = (
+ 'hello-world@sha256:083de497cff944f969d8499ab94f07134c50bcf5e6b95'
+ '59b27182d3fa80ce3f7'
+ )
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull(image_ref)
+ assert image_ref in image.attrs['RepoDigests']
+
+ def test_pull_multiple(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ images = client.images.pull('hello-world')
+ assert len(images) == 1
+ assert 'hello-world:latest' in images[0].attrs['RepoTags']
+
+ def test_load_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with pytest.raises(docker.errors.ImageLoadError):
+ client.images.load('abc')
+
+ def test_save_and_load(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.get(BUSYBOX)
+ with tempfile.TemporaryFile() as f:
+ stream = image.save()
+ for chunk in stream:
+ f.write(chunk)
+
+ f.seek(0)
+ result = client.images.load(f.read())
+
+ assert len(result) == 1
+ assert result[0].id == image.id
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_tag_and_remove(self):
+ repo = 'dockersdk.tests.images.test_tag'
+ tag = 'some-tag'
+ identifier = '{}:{}'.format(repo, tag)
+
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+
+ result = image.tag(repo, tag)
+ assert result is True
+ self.tmp_imgs.append(identifier)
+ assert image.id in get_ids(client.images.list(repo))
+ assert image.id in get_ids(client.images.list(identifier))
+
+ client.images.remove(identifier)
+ assert image.id not in get_ids(client.images.list(repo))
+ assert image.id not in get_ids(client.images.list(identifier))
+
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+
+def get_ids(images):
+ return [i.id for i in images]
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
new file mode 100644
index 0000000..08d7ad2
--- /dev/null
+++ b/tests/integration/models_networks_test.py
@@ -0,0 +1,70 @@
+import docker
+from .. import helpers
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class NetworkCollectionTest(BaseIntegrationTest):
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name, labels={'foo': 'bar'})
+ self.tmp_networks.append(network.id)
+ assert network.name == name
+ assert network.attrs['Labels']['foo'] == "bar"
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network_id = client.networks.create(name).id
+ self.tmp_networks.append(network_id)
+ network = client.networks.get(network_id)
+ assert network.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name)
+ self.tmp_networks.append(network.id)
+ assert network.id in [n.id for n in client.networks.list()]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(ids=["fdhjklfdfdshjkfds"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(ids=[network.id])
+ ]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(names=["fdshjklfdsjhkl"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(names=[name])
+ ]
+ network.remove()
+ assert network.id not in [n.id for n in client.networks.list()]
+
+
+class NetworkTest(BaseIntegrationTest):
+
+ def test_connect_disconnect(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network = client.networks.create(helpers.random_name())
+ self.tmp_networks.append(network.id)
+ container = client.containers.create("alpine", "sleep 300")
+ self.tmp_containers.append(container.id)
+ assert network.containers == []
+ network.connect(container)
+ container.start()
+ assert client.networks.get(network.id).containers == [container]
+ network_containers = list(
+ c
+ for net in client.networks.list(ids=[network.id], greedy=True)
+ for c in net.containers
+ )
+ assert network_containers == [container]
+ network.disconnect(container)
+ assert network.containers == []
+ assert client.networks.get(network.id).containers == []
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
new file mode 100644
index 0000000..3c8d48a
--- /dev/null
+++ b/tests/integration/models_nodes_test.py
@@ -0,0 +1,37 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+
+
+class NodesTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_list_get_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
+ nodes = client.nodes.list()
+ assert len(nodes) == 1
+ assert nodes[0].attrs['Spec']['Role'] == 'manager'
+
+ node = client.nodes.get(nodes[0].id)
+ assert node.id == nodes[0].id
+ assert node.attrs['Spec']['Role'] == 'manager'
+ assert node.version > 0
+
+ node = client.nodes.list()[0]
+ assert not node.attrs['Spec'].get('Labels')
+ node.update({
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ })
+ node.reload()
+ assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py
new file mode 100644
index 0000000..4aafe0c
--- /dev/null
+++ b/tests/integration/models_resources_test.py
@@ -0,0 +1,16 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class ModelTest(BaseIntegrationTest):
+
+ def test_reload(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.kill()
+ container.start()
+ assert container.attrs['State']['StartedAt'] == first_started_at
+ container.reload()
+ assert container.attrs['State']['StartedAt'] != first_started_at
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
new file mode 100644
index 0000000..36caa85
--- /dev/null
+++ b/tests/integration/models_services_test.py
@@ -0,0 +1,335 @@
+import unittest
+
+import docker
+import pytest
+
+from .. import helpers
+from .base import TEST_API_VERSION
+from docker.errors import InvalidArgument
+from docker.types.services import ServiceMode
+
+
+class ServiceTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ client = docker.from_env(version=TEST_API_VERSION)
+ helpers.force_leave_swarm(client)
+ client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
+
+ @classmethod
+ def tearDownClass(cls):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ # create arguments
+ name=name,
+ labels={'foo': 'bar'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'}
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+
+ def test_create_with_network(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(
+ helpers.random_name(), driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=name,
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ networks=[network.id]
+ )
+ assert 'Networks' in service.attrs['Spec']['TaskTemplate']
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert len(networks) == 1
+ assert networks[0]['Target'] == network.id
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ image="alpine",
+ command="sleep 300"
+ )
+ service = client.services.get(service.id)
+ assert service.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ assert service in client.services.list()
+ service.remove()
+ assert service not in client.services.list()
+
+ def test_tasks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service1 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ service2 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service1.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service1.id
+
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service2.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service2.id
+
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Command'] == ["sleep", "600"]
+
+ def test_update_retains_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ labels = service.attrs['Spec']['Labels']
+ assert labels == {'service.label': 'SampleLabel'}
+
+ def test_update_retains_container_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container.label': 'SampleLabel'}
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Labels'] == {'container.label': 'SampleLabel'}
+
+ def test_update_remove_service_labels(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ labels={'service.label': 'SampleLabel'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ labels={},
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert not service.attrs['Spec'].get('Labels')
+
+ @pytest.mark.xfail(reason='Flaky test')
+ def test_update_retains_networks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network_name = helpers.random_name()
+ network = client.networks.create(
+ network_name, driver='overlay'
+ )
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ networks=[network.id],
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.reload()
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ networks = service.attrs['Spec']['TaskTemplate']['Networks']
+ assert networks == [{'Target': network.id}]
+
+ def test_scale_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.update(
+ mode=docker.types.ServiceMode('replicated', replicas=2),
+ )
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ service.scale(2)
+ while len(tasks) == 1:
+ tasks = service.tasks()
+ assert len(tasks) >= 2
+ # check that the container spec is not overridden with None
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ def test_scale_method_global_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ mode = ServiceMode('global')
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300",
+ mode=mode
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service.tasks()
+ assert len(tasks) == 1
+ with pytest.raises(InvalidArgument):
+ service.scale(2)
+
+ assert len(tasks) == 1
+ service.reload()
+ spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert spec.get('Command') == ['sleep', '300']
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=10,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_bool(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.update(
+ # create argument
+ name=service.name,
+ # task template argument
+ force_update=True,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.version > initial_version
+
+ @helpers.requires_api_version('1.25')
+ def test_force_update_service_using_shorthand_method(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ initial_version = service.version
+ assert service.force_update()
+ service.reload()
+ assert service.version > initial_version
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
new file mode 100644
index 0000000..f39f0d3
--- /dev/null
+++ b/tests/integration/models_swarm_test.py
@@ -0,0 +1,33 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+import pytest
+
+
+class SwarmTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_init_update_leave(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init(
+ advertise_addr='127.0.0.1', snapshot_interval=5000,
+ listen_addr=helpers.swarm_listen_addr()
+ )
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
+ client.swarm.update(snapshot_interval=10000)
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
+ assert client.swarm.id
+ assert client.swarm.leave(force=True)
+ with pytest.raises(docker.errors.APIError) as cm:
+ client.swarm.reload()
+ assert (
+ cm.value.response.status_code == 406 or
+ cm.value.response.status_code == 503
+ )
diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py
new file mode 100644
index 0000000..47b4a45
--- /dev/null
+++ b/tests/integration/models_volumes_test.py
@@ -0,0 +1,30 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class VolumesTest(BaseIntegrationTest):
+ def test_create_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create(
+ 'dockerpytest_1',
+ driver='local',
+ labels={'labelkey': 'labelvalue'}
+ )
+ self.tmp_volumes.append(volume.id)
+ assert volume.id
+ assert volume.name == 'dockerpytest_1'
+ assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
+
+ volume = client.volumes.get(volume.id)
+ assert volume.name == 'dockerpytest_1'
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create('dockerpytest_1')
+ self.tmp_volumes.append(volume.id)
+ assert volume in client.volumes.list()
+ assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
+ assert volume not in client.volumes.list(filters={'name': 'foobar'})
+
+ volume.remove()
+ assert volume not in client.volumes.list()
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
new file mode 100644
index 0000000..0fd4e43
--- /dev/null
+++ b/tests/integration/regression_test.py
@@ -0,0 +1,65 @@
+import io
+import random
+
+import docker
+import six
+
+from .base import BaseAPIIntegrationTest, BUSYBOX
+import pytest
+
+
+class TestRegressions(BaseAPIIntegrationTest):
+ def test_443_handle_nonchunked_response_in_stream(self):
+ dfile = io.BytesIO()
+ with pytest.raises(docker.errors.APIError) as exc:
+ for line in self.client.build(fileobj=dfile, tag="a/b/c"):
+ pass
+ assert exc.value.response.status_code == 500
+ dfile.close()
+
+ def test_542_truncate_ids_client_side(self):
+ self.client.start(
+ self.client.create_container(BUSYBOX, ['true'])
+ )
+ result = self.client.containers(all=True, trunc=True)
+ assert len(result[0]['Id']) == 12
+
+ def test_647_support_doubleslash_in_image_names(self):
+ with pytest.raises(docker.errors.APIError):
+ self.client.inspect_image('gensokyo.jp//kirisame')
+
+ def test_649_handle_timeout_value_none(self):
+ self.client.timeout = None
+ ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
+ self.client.start(ctnr)
+ self.client.stop(ctnr)
+
+ def test_715_handle_user_param_as_int_value(self):
+ ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ logs = self.client.logs(ctnr)
+ if six.PY3:
+ logs = logs.decode('utf-8')
+ assert logs == '1000\n'
+
+ def test_792_explicit_port_protocol(self):
+
+ tcp_port, udp_port = random.sample(range(9999, 32000), 2)
+ ctnr = self.client.create_container(
+ BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
+ host_config=self.client.create_host_config(
+ port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
+ )
+ )
+ self.tmp_containers.append(ctnr)
+ self.client.start(ctnr)
+ assert self.client.port(
+ ctnr, 2000
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/tcp'
+ )[0]['HostPort'] == six.text_type(tcp_port)
+ assert self.client.port(
+ ctnr, '2000/udp'
+ )[0]['HostPort'] == six.text_type(udp_port)
diff --git a/tests/integration/testdata/dummy-plugin/config.json b/tests/integration/testdata/dummy-plugin/config.json
new file mode 100644
index 0000000..53b4e7a
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/config.json
@@ -0,0 +1,19 @@
+{
+ "description": "Dummy test plugin for docker python SDK",
+ "documentation": "https://github.com/docker/docker-py",
+ "entrypoint": ["/dummy"],
+ "network": {
+ "type": "host"
+ },
+ "interface" : {
+ "types": ["docker.volumedriver/1.0"],
+ "socket": "dummy.sock"
+ },
+ "env": [
+ {
+ "name":"DEBUG",
+ "settable":["value"],
+ "value":"0"
+ }
+ ]
+}
diff --git a/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unit/__init__.py
diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py
new file mode 100644
index 0000000..a7f34fd
--- /dev/null
+++ b/tests/unit/api_build_test.py
@@ -0,0 +1,163 @@
+import gzip
+import io
+
+import docker
+from docker import auth
+
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
+import pytest
+
+
+class BuildTest(BaseAPIClientTest):
+ def test_build_container(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script)
+
+ def test_build_container_pull(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+
+ self.client.build(fileobj=script, pull=True)
+
+ def test_build_container_custom_context(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ context = docker.utils.mkbuildcontext(script)
+
+ self.client.build(fileobj=context, custom_context=True)
+
+ def test_build_container_custom_context_gzip(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN mkdir -p /tmp/test',
+ 'EXPOSE 8080',
+ 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
+ ' /tmp/silence.tar.gz'
+ ]).encode('ascii'))
+ context = docker.utils.mkbuildcontext(script)
+ gz_context = gzip.GzipFile(fileobj=context)
+
+ self.client.build(
+ fileobj=gz_context,
+ custom_context=True,
+ encoding="gzip"
+ )
+
+ def test_build_remote_with_registry_auth(self):
+ self.client._auth_configs = {
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
+ }
+ }
+
+ expected_params = {'t': None, 'q': False, 'dockerfile': None,
+ 'rm': False, 'nocache': False, 'pull': False,
+ 'forcerm': False,
+ 'remote': 'https://github.com/docker-library/mongo'}
+ expected_headers = {
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
+
+ self.client.build(path='https://github.com/docker-library/mongo')
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'build',
+ stream=True,
+ data=None,
+ headers=expected_headers,
+ params=expected_params,
+ timeout=None
+ )
+
+ def test_build_container_with_named_dockerfile(self):
+ self.client.build('.', dockerfile='nameddockerfile')
+
+ def test_build_container_with_container_limits(self):
+ self.client.build('.', container_limits={
+ 'memory': 1024 * 1024,
+ 'cpusetcpus': 1,
+ 'cpushares': 1000,
+ 'memswap': 1024 * 1024 * 8
+ })
+
+ def test_build_container_invalid_container_limits(self):
+ with pytest.raises(docker.errors.DockerException):
+ self.client.build('.', container_limits={
+ 'foo': 'bar'
+ })
+
+ def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
+ self.client._auth_configs = {
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
+ }
+ }
+
+ headers = {}
+ expected_headers = {
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ )
+ }
+
+ self.client._set_auth_headers(headers)
+ assert headers == expected_headers
+
+ def test_set_auth_headers_with_dict_and_auth_configs(self):
+ self.client._auth_configs = {
+ 'auths': {
+ 'https://example.com': {
+ 'user': 'example',
+ 'password': 'example',
+ 'email': 'example@example.com'
+ }
+ }
+ }
+
+ headers = {'foo': 'bar'}
+ expected_headers = {
+ 'X-Registry-Config': auth.encode_header(
+ self.client._auth_configs['auths']
+ ),
+ 'foo': 'bar'
+ }
+
+ self.client._set_auth_headers(headers)
+ assert headers == expected_headers
+
+ def test_set_auth_headers_with_dict_and_no_auth_configs(self):
+ headers = {'foo': 'bar'}
+ expected_headers = {
+ 'foo': 'bar'
+ }
+
+ self.client._set_auth_headers(headers)
+ assert headers == expected_headers
diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
new file mode 100644
index 0000000..a7e183c
--- /dev/null
+++ b/tests/unit/api_container_test.py
@@ -0,0 +1,1448 @@
+# -*- coding: utf-8 -*-
+
+import datetime
+import json
+import signal
+
+import docker
+import pytest
+import six
+
+from . import fake_api
+from ..helpers import requires_api_version
+from .api_test import (
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ fake_inspect_container
+)
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+def fake_inspect_container_tty(self, container):
+ return fake_inspect_container(self, container, tty=True)
+
+
+class StartContainerTest(BaseAPIClientTest):
+ def test_start_container(self):
+ self.client.start(fake_api.FAKE_CONTAINER_ID)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
+ assert 'data' not in args[1]
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_start_container_none(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.client.start(container=None)
+
+ assert str(excinfo.value) == 'Resource ID was not provided'
+
+ with pytest.raises(ValueError) as excinfo:
+ self.client.start(None)
+
+ assert str(excinfo.value) == 'Resource ID was not provided'
+
+ def test_start_container_regression_573(self):
+ self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
+
+ def test_start_container_with_lxc_conf(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID,
+ lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
+ )
+
+ def test_start_container_with_lxc_conf_compat(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID,
+ lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
+ )
+
+ def test_start_container_with_binds_ro(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID, binds={
+ '/tmp': {
+ "bind": '/mnt',
+ "ro": True
+ }
+ }
+ )
+
+ def test_start_container_with_binds_rw(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID, binds={
+ '/tmp': {"bind": '/mnt', "ro": False}
+ }
+ )
+
+ def test_start_container_with_port_binds(self):
+ self.maxDiff = None
+
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
+ 1111: None,
+ 2222: 2222,
+ '3333/udp': (3333,),
+ 4444: ('127.0.0.1',),
+ 5555: ('127.0.0.1', 5555),
+ 6666: [('127.0.0.1',), ('192.168.0.1',)]
+ })
+
+ def test_start_container_with_links(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
+ )
+
+ def test_start_container_with_multiple_links(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(
+ fake_api.FAKE_CONTAINER_ID,
+ links={
+ 'path1': 'alias1',
+ 'path2': 'alias2'
+ }
+ )
+
+ def test_start_container_with_links_as_list_of_tuples(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(fake_api.FAKE_CONTAINER_ID,
+ links=[('path', 'alias')])
+
+ def test_start_container_privileged(self):
+ with pytest.raises(docker.errors.DeprecatedMethod):
+ self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
+
+ def test_start_container_with_dict_instead_of_id(self):
+ self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/start'
+ assert 'data' not in args[1]
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+
+class CreateContainerTest(BaseAPIClientTest):
+ def test_create_container(self):
+ self.client.create_container('busybox', 'true')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_binds(self):
+ mount_dest = '/mnt'
+
+ self.client.create_container('busybox', ['ls', mount_dest],
+ volumes=[mount_dest])
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_volume_string(self):
+ mount_dest = '/mnt'
+
+ self.client.create_container('busybox', ['ls', mount_dest],
+ volumes=mount_dest)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls", "/mnt"], "AttachStdin": false,
+ "Volumes": {"/mnt": {}},
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_ports(self):
+ self.client.create_container('busybox', 'ls',
+ ports=[1111, (2222, 'udp'), (3333,)])
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "ExposedPorts": {
+ "1111/tcp": {},
+ "2222/udp": {},
+ "3333/tcp": {}
+ },
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_entrypoint(self):
+ self.client.create_container('busybox', 'hello',
+ entrypoint='cowsay entry')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["hello"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "Entrypoint": ["cowsay", "entry"]}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_host_config_cpu_shares(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpu_shares=512
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuShares": 512,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_host_config_cpuset(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpuset_cpus='0,1'
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetCpus": "0,1",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_host_config_cpuset_mems(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpuset_mems='0'
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetMems": "0",
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_cgroup_parent(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cgroup_parent='test'
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ data = json.loads(args[1]['data'])
+ assert 'HostConfig' in data
+ assert 'CgroupParent' in data['HostConfig']
+ assert data['HostConfig']['CgroupParent'] == 'test'
+
+ def test_create_container_with_working_dir(self):
+ self.client.create_container('busybox', 'ls',
+ working_dir='/root')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "WorkingDir": "/root"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_stdin_open(self):
+ self.client.create_container('busybox', 'true', stdin_open=True)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": true,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": true,
+ "OpenStdin": true, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_named_container(self):
+ self.client.create_container('busybox', 'true',
+ name='marisa-kirisame')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox", "Cmd": ["true"],
+ "AttachStdin": false,
+ "AttachStderr": true, "AttachStdout": true,
+ "StdinOnce": false,
+ "OpenStdin": false, "NetworkDisabled": false}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['params'] == {'name': 'marisa-kirisame'}
+
+ def test_create_container_with_mem_limit_as_int(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ mem_limit=128.0
+ )
+ )
+
+ args = fake_request.call_args
+ data = json.loads(args[1]['data'])
+ assert data['HostConfig']['Memory'] == 128.0
+
+ def test_create_container_with_mem_limit_as_string(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ mem_limit='128'
+ )
+ )
+
+ args = fake_request.call_args
+ data = json.loads(args[1]['data'])
+ assert data['HostConfig']['Memory'] == 128.0
+
+ def test_create_container_with_mem_limit_as_string_with_k_unit(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ mem_limit='128k'
+ )
+ )
+
+ args = fake_request.call_args
+ data = json.loads(args[1]['data'])
+ assert data['HostConfig']['Memory'] == 128.0 * 1024
+
+ def test_create_container_with_mem_limit_as_string_with_m_unit(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ mem_limit='128m'
+ )
+ )
+
+ args = fake_request.call_args
+ data = json.loads(args[1]['data'])
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024
+
+ def test_create_container_with_mem_limit_as_string_with_g_unit(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ mem_limit='128g'
+ )
+ )
+
+ args = fake_request.call_args
+ data = json.loads(args[1]['data'])
+ assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024 * 1024
+
+ def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='128p')
+
+ with pytest.raises(docker.errors.DockerException):
+ self.client.create_host_config(mem_limit='1f28')
+
+ def test_create_container_with_lxc_conf(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['LxcConf'] = [
+ {"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
+ ]
+
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_lxc_conf_compat(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['LxcConf'] = [
+ {"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_binds_ro(self):
+ mount_dest = '/mnt'
+ mount_origin = '/tmp'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ binds={mount_origin: {
+ "bind": mount_dest,
+ "ro": True
+ }}
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_binds_rw(self):
+ mount_dest = '/mnt'
+ mount_origin = '/tmp'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ binds={mount_origin: {
+ "bind": mount_dest,
+ "ro": False
+ }}
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_binds_mode(self):
+ mount_dest = '/mnt'
+ mount_origin = '/tmp'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ binds={mount_origin: {
+ "bind": mount_dest,
+ "mode": "z",
+ }}
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_binds_mode_and_ro_error(self):
+ with pytest.raises(ValueError):
+ mount_dest = '/mnt'
+ mount_origin = '/tmp'
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ binds={mount_origin: {
+ "bind": mount_dest,
+ "mode": "z",
+ "ro": True,
+ }}
+ )
+ )
+
+ def test_create_container_with_binds_list(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ binds=[
+ "/tmp:/mnt/1:ro",
+ "/tmp:/mnt/2",
+ ],
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Binds'] = [
+ "/tmp:/mnt/1:ro",
+ "/tmp:/mnt/2",
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_port_binds(self):
+ self.maxDiff = None
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ port_bindings={
+ 1111: None,
+ 2222: 2222,
+ '3333/udp': (3333,),
+ 4444: ('127.0.0.1',),
+ 5555: ('127.0.0.1', 5555),
+ 6666: [('127.0.0.1',), ('192.168.0.1',)]
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ data = json.loads(args[1]['data'])
+ port_bindings = data['HostConfig']['PortBindings']
+ assert '1111/tcp' in port_bindings
+ assert '2222/tcp' in port_bindings
+ assert '3333/udp' in port_bindings
+ assert '4444/tcp' in port_bindings
+ assert '5555/tcp' in port_bindings
+ assert '6666/tcp' in port_bindings
+ assert [{"HostPort": "", "HostIp": ""}] == port_bindings['1111/tcp']
+ assert [
+ {"HostPort": "2222", "HostIp": ""}
+ ] == port_bindings['2222/tcp']
+ assert [
+ {"HostPort": "3333", "HostIp": ""}
+ ] == port_bindings['3333/udp']
+ assert [
+ {"HostPort": "", "HostIp": "127.0.0.1"}
+ ] == port_bindings['4444/tcp']
+ assert [
+ {"HostPort": "5555", "HostIp": "127.0.0.1"}
+ ] == port_bindings['5555/tcp']
+ assert len(port_bindings['6666/tcp']) == 2
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_mac_address(self):
+ expected = "02:42:ac:11:00:0a"
+
+ self.client.create_container(
+ 'busybox',
+ ['sleep', '60'],
+ mac_address=expected
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ data = json.loads(args[1]['data'])
+ assert data['MacAddress'] == expected
+
+ def test_create_container_with_links(self):
+ link_path = 'path'
+ alias = 'alias'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ links={link_path: alias}
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Links'] = ['path:alias']
+
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_multiple_links(self):
+ link_path = 'path'
+ alias = 'alias'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ links={
+ link_path + '1': alias + '1',
+ link_path + '2': alias + '2'
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Links'] = [
+ 'path1:alias1', 'path2:alias2'
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_with_links_as_list_of_tuples(self):
+ link_path = 'path'
+ alias = 'alias'
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ links=[(link_path, alias)]
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Links'] = ['path:alias']
+
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_create_container_privileged(self):
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(privileged=True)
+ )
+
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Privileged'] = True
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_restart_policy(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ restart_policy={
+ "Name": "always",
+ "MaximumRetryCount": 0
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['RestartPolicy'] = {
+ "MaximumRetryCount": 0, "Name": "always"
+ }
+ assert json.loads(args[1]['data']) == expected_payload
+
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_added_capabilities(self):
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(cap_add=['MKNOD'])
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_dropped_capabilities(self):
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(cap_drop=['MKNOD'])
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_devices(self):
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ devices=['/dev/sda:/dev/xvda:rwm',
+ '/dev/sdb:/dev/xvdb',
+ '/dev/sdc']
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Devices'] = [
+ {'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvda',
+ 'PathOnHost': '/dev/sda'},
+ {'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvdb',
+ 'PathOnHost': '/dev/sdb'},
+ {'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/sdc',
+ 'PathOnHost': '/dev/sdc'}
+ ]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_labels_dict(self):
+ labels_dict = {
+ six.text_type('foo'): six.text_type('1'),
+ six.text_type('bar'): six.text_type('2'),
+ }
+
+ self.client.create_container(
+ 'busybox', 'true',
+ labels=labels_dict,
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_labels_list(self):
+ labels_list = [
+ six.text_type('foo'),
+ six.text_type('bar'),
+ ]
+ labels_dict = {
+ six.text_type('foo'): six.text_type(),
+ six.text_type('bar'): six.text_type(),
+ }
+
+ self.client.create_container(
+ 'busybox', 'true',
+ labels=labels_list,
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Labels'] == labels_dict
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_named_volume(self):
+ mount_dest = '/mnt'
+ volume_name = 'name'
+
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(
+ volume_driver='foodriver',
+ binds={volume_name: {
+ "bind": mount_dest,
+ "ro": False
+ }}),
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['VolumeDriver'] = 'foodriver'
+ expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_stop_signal(self):
+ self.client.create_container('busybox', 'ls',
+ stop_signal='SIGINT')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "StopSignal": "SIGINT"}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ @requires_api_version('1.22')
+ def test_create_container_with_aliases(self):
+ self.client.create_container(
+ 'busybox', 'ls',
+ host_config=self.client.create_host_config(
+ network_mode='some-network',
+ ),
+ networking_config=self.client.create_networking_config({
+ 'some-network': self.client.create_endpoint_config(
+ aliases=['foo', 'bar'],
+ ),
+ }),
+ )
+
+ args = fake_request.call_args
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "NetworkMode": "some-network"
+ },
+ "NetworkingConfig": {
+ "EndpointsConfig": {
+ "some-network": {"Aliases": ["foo", "bar"]}
+ }
+ }}
+ ''')
+
+ @requires_api_version('1.22')
+ def test_create_container_with_tmpfs_list(self):
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ tmpfs=[
+ "/tmp",
+ "/mnt:size=3G,uid=100"
+ ]
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Tmpfs'] = {
+ "/tmp": "",
+ "/mnt": "size=3G,uid=100"
+ }
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ @requires_api_version('1.22')
+ def test_create_container_with_tmpfs_dict(self):
+
+ self.client.create_container(
+ 'busybox', 'true', host_config=self.client.create_host_config(
+ tmpfs={
+ "/tmp": "",
+ "/mnt": "size=3G,uid=100"
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Tmpfs'] = {
+ "/tmp": "",
+ "/mnt": "size=3G,uid=100"
+ }
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ @requires_api_version('1.24')
+ def test_create_container_with_sysctl(self):
+ self.client.create_container(
+ 'busybox', 'true',
+ host_config=self.client.create_host_config(
+ sysctls={
+ 'net.core.somaxconn': 1024,
+ 'net.ipv4.tcp_syncookies': '0',
+ }
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ expected_payload = self.base_create_payload()
+ expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['Sysctls'] = {
+ 'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
+ }
+ assert json.loads(args[1]['data']) == expected_payload
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS
+
+ def test_create_container_with_unicode_envvars(self):
+ envvars_dict = {
+ 'foo': u'☃',
+ }
+
+ expected = [
+ u'foo=☃'
+ ]
+
+ self.client.create_container(
+ 'busybox', 'true',
+ environment=envvars_dict,
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+ assert json.loads(args[1]['data'])['Env'] == expected
+
+ @requires_api_version('1.25')
+ def test_create_container_with_host_config_cpus(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpu_count=1,
+ cpu_percent=20,
+ nano_cpus=1000
+ )
+ )
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/create'
+
+ assert json.loads(args[1]['data']) == json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuCount": 1,
+ "CpuPercent": 20,
+ "NanoCpus": 1000,
+ "NetworkMode": "default"
+ }}
+ ''')
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+
+class ContainerTest(BaseAPIClientTest):
+ def test_list_containers(self):
+ self.client.containers(all=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/json',
+ params={
+ 'all': 1,
+ 'since': None,
+ 'size': 0,
+ 'limit': -1,
+ 'trunc_cmd': 0,
+ 'before': None
+ },
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_resize_container(self):
+ self.client.resize(
+ {'Id': fake_api.FAKE_CONTAINER_ID},
+ height=15,
+ width=120
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/resize',
+ params={'h': 15, 'w': 120},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_rename_container(self):
+ self.client.rename(
+ {'Id': fake_api.FAKE_CONTAINER_ID},
+ name='foobar'
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/rename',
+ params={'name': 'foobar'},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_wait(self):
+ self.client.wait(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/wait',
+ timeout=None,
+ params={}
+ )
+
+ def test_wait_with_dict_instead_of_id(self):
+ self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/wait',
+ timeout=None,
+ params={}
+ )
+
+ def test_logs(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+
+ def test_logs_with_dict_instead_of_id(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ assert logs == 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
+
+ def test_log_streaming(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
+ follow=False)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=True
+ )
+
+ def test_log_following(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ def test_log_following_backwards(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=True
+ )
+
+ def test_log_streaming_and_following(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
+ follow=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=True
+ )
+
+ def test_log_tail(self):
+
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, tail=10)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 10},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ def test_log_since(self):
+ ts = 809222400
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, since=ts)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all', 'since': ts},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ def test_log_since_with_datetime(self):
+ ts = 809222400
+ time = datetime.datetime.utcfromtimestamp(ts)
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, since=time)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all', 'since': ts},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=False
+ )
+
+ def test_log_since_with_invalid_value_raises_error(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ with pytest.raises(docker.errors.InvalidArgument):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, since=42.42)
+
+ def test_log_tty(self):
+ m = mock.Mock()
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container_tty):
+ with mock.patch('docker.api.client.APIClient._stream_raw_result',
+ m):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID,
+ follow=True, stream=True)
+
+ assert m.called
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/logs',
+ params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
+ 'tail': 'all'},
+ timeout=DEFAULT_TIMEOUT_SECONDS,
+ stream=True
+ )
+
+ def test_diff(self):
+ self.client.diff(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/changes',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_diff_with_dict_instead_of_id(self):
+ self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/changes',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_port(self):
+ self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/json',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_stop_container(self):
+ timeout = 2
+
+ self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/stop',
+ params={'t': timeout},
+ timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
+ )
+
+ def test_stop_container_with_dict_instead_of_id(self):
+ timeout = 2
+
+ self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
+ timeout=timeout)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/stop',
+ params={'t': timeout},
+ timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
+ )
+
+ def test_pause_container(self):
+ self.client.pause(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/pause',
+ timeout=(DEFAULT_TIMEOUT_SECONDS)
+ )
+
+ def test_unpause_container(self):
+ self.client.unpause(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/unpause',
+ timeout=(DEFAULT_TIMEOUT_SECONDS)
+ )
+
+ def test_kill_container(self):
+ self.client.kill(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/kill',
+ params={},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_kill_container_with_dict_instead_of_id(self):
+ self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/kill',
+ params={},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_kill_container_with_signal(self):
+ self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/kill',
+ params={'signal': signal.SIGTERM},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_restart_container(self):
+ self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/restart',
+ params={'t': 2},
+ timeout=(DEFAULT_TIMEOUT_SECONDS + 2)
+ )
+
+ def test_restart_container_with_dict_instead_of_id(self):
+ self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'containers/3cc2351ab11b/restart',
+ params={'t': 2},
+ timeout=(DEFAULT_TIMEOUT_SECONDS + 2)
+ )
+
+ def test_remove_container(self):
+ self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'DELETE',
+ url_prefix + 'containers/3cc2351ab11b',
+ params={'v': False, 'link': False, 'force': False},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_remove_container_with_dict_instead_of_id(self):
+ self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'DELETE',
+ url_prefix + 'containers/3cc2351ab11b',
+ params={'v': False, 'link': False, 'force': False},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_export(self):
+ self.client.export(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/export',
+ stream=True,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_export_with_dict_instead_of_id(self):
+ self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/export',
+ stream=True,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_inspect_container(self):
+ self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/json',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_inspect_container_undefined_id(self):
+ for arg in None, '', {True: True}:
+ with pytest.raises(docker.errors.NullResource) as excinfo:
+ self.client.inspect_container(arg)
+
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
+
+ def test_container_stats(self):
+ self.client.stats(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/stats',
+ timeout=60,
+ stream=True
+ )
+
+ def test_container_top(self):
+ self.client.top(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/top',
+ params={},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_container_top_with_psargs(self):
+ self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux')
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'containers/3cc2351ab11b/top',
+ params={'ps_args': 'waux'},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ @requires_api_version('1.22')
+ def test_container_update(self):
+ self.client.update_container(
+ fake_api.FAKE_CONTAINER_ID, mem_limit='2k', cpu_shares=124,
+ blkio_weight=345
+ )
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'containers/3cc2351ab11b/update'
+ assert json.loads(args[1]['data']) == {
+ 'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345
+ }
+ assert args[1]['headers']['Content-Type'] == 'application/json'
diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py
new file mode 100644
index 0000000..a9d2dd5
--- /dev/null
+++ b/tests/unit/api_exec_test.py
@@ -0,0 +1,83 @@
+import json
+
+from . import fake_api
+from .api_test import (
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+)
+
+
+class ExecTest(BaseAPIClientTest):
+ def test_exec_create(self):
+ self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
+
+ args = fake_request.call_args
+ assert 'POST' == args[0][0], url_prefix + 'containers/{0}/exec'.format(
+ fake_api.FAKE_CONTAINER_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'AttachStdout': True,
+ 'Container': fake_api.FAKE_CONTAINER_ID,
+ 'Cmd': ['ls', '-1'],
+ 'Privileged': False,
+ 'AttachStdin': False,
+ 'AttachStderr': True,
+ 'User': ''
+ }
+
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+
+ def test_exec_start(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': False,
+ }
+
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json',
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
+ def test_exec_start_detached(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ assert json.loads(args[1]['data']) == {
+ 'Tty': False,
+ 'Detach': True
+ }
+
+ assert args[1]['headers'] == {
+ 'Content-Type': 'application/json'
+ }
+
+ def test_exec_inspect(self):
+ self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'exec/{0}/json'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+
+ def test_exec_resize(self):
+ self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
+ params={'h': 20, 'w': 60},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py
new file mode 100644
index 0000000..1e2315d
--- /dev/null
+++ b/tests/unit/api_image_test.py
@@ -0,0 +1,357 @@
+import docker
+import pytest
+
+from . import fake_api
+from docker import auth
+from .api_test import (
+ BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
+ fake_resolve_authconfig
+)
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class ImageTest(BaseAPIClientTest):
+ def test_image_viz(self):
+ with pytest.raises(Exception):
+ self.client.images('busybox', viz=True)
+ self.fail('Viz output should not be supported!')
+
+ def test_images(self):
+ self.client.images(all=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'filter': None, 'only_ids': 0, 'all': 1},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_images_quiet(self):
+ self.client.images(all=True, quiet=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'filter': None, 'only_ids': 1, 'all': 1},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_image_ids(self):
+ self.client.images(quiet=True)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'filter': None, 'only_ids': 1, 'all': 0},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_images_filters(self):
+ self.client.images(filters={'dangling': True})
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/json',
+ params={'filter': None, 'only_ids': 0, 'all': 0,
+ 'filters': '{"dangling": ["true"]}'},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_pull(self):
+ self.client.pull('joffrey/test001')
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert not args[1]['stream']
+
+ def test_pull_stream(self):
+ self.client.pull('joffrey/test001', stream=True)
+
+ args = fake_request.call_args
+ assert args[0][1] == url_prefix + 'images/create'
+ assert args[1]['params'] == {
+ 'tag': None, 'fromImage': 'joffrey/test001'
+ }
+ assert args[1]['stream']
+
+ def test_commit(self):
+ self.client.commit(fake_api.FAKE_CONTAINER_ID)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'commit',
+ data='{}',
+ headers={'Content-Type': 'application/json'},
+ params={
+ 'repo': None,
+ 'comment': None,
+ 'tag': None,
+ 'container': '3cc2351ab11b',
+ 'author': None,
+ 'changes': None
+ },
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_remove_image(self):
+ self.client.remove_image(fake_api.FAKE_IMAGE_ID)
+
+ fake_request.assert_called_with(
+ 'DELETE',
+ url_prefix + 'images/e9aa60c60128',
+ params={'force': False, 'noprune': False},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_image_history(self):
+ self.client.history(fake_api.FAKE_IMAGE_NAME)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/test_image/history',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_import_image(self):
+ self.client.import_image(
+ fake_api.FAKE_TARBALL_PATH,
+ repository=fake_api.FAKE_REPO_NAME,
+ tag=fake_api.FAKE_TAG_NAME
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/create',
+ params={
+ 'repo': fake_api.FAKE_REPO_NAME,
+ 'tag': fake_api.FAKE_TAG_NAME,
+ 'fromSrc': fake_api.FAKE_TARBALL_PATH
+ },
+ data=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_import_image_from_bytes(self):
+ stream = (i for i in range(0, 100))
+
+ self.client.import_image(
+ stream,
+ repository=fake_api.FAKE_REPO_NAME,
+ tag=fake_api.FAKE_TAG_NAME
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/create',
+ params={
+ 'repo': fake_api.FAKE_REPO_NAME,
+ 'tag': fake_api.FAKE_TAG_NAME,
+ 'fromSrc': '-',
+ },
+ headers={
+ 'Content-Type': 'application/tar',
+ },
+ data=stream,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_import_image_from_image(self):
+ self.client.import_image(
+ image=fake_api.FAKE_IMAGE_NAME,
+ repository=fake_api.FAKE_REPO_NAME,
+ tag=fake_api.FAKE_TAG_NAME
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/create',
+ params={
+ 'repo': fake_api.FAKE_REPO_NAME,
+ 'tag': fake_api.FAKE_TAG_NAME,
+ 'fromImage': fake_api.FAKE_IMAGE_NAME
+ },
+ data=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_inspect_image(self):
+ self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/test_image/json',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_inspect_image_undefined_id(self):
+ for arg in None, '', {True: True}:
+ with pytest.raises(docker.errors.NullResource) as excinfo:
+ self.client.inspect_image(arg)
+
+ assert excinfo.value.args[0] == 'Resource ID was not provided'
+
+ def test_push_image(self):
+ with mock.patch('docker.auth.resolve_authconfig',
+ fake_resolve_authconfig):
+ self.client.push(fake_api.FAKE_IMAGE_NAME)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/test_image/push',
+ params={
+ 'tag': None
+ },
+ data='{}',
+ headers={'Content-Type': 'application/json'},
+ stream=False,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_push_image_with_tag(self):
+ with mock.patch('docker.auth.resolve_authconfig',
+ fake_resolve_authconfig):
+ self.client.push(
+ fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/test_image/push',
+ params={
+ 'tag': fake_api.FAKE_TAG_NAME,
+ },
+ data='{}',
+ headers={'Content-Type': 'application/json'},
+ stream=False,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_push_image_with_auth(self):
+ auth_config = {
+ 'username': "test_user",
+ 'password': "test_password",
+ 'serveraddress': "test_server",
+ }
+ encoded_auth = auth.encode_header(auth_config)
+ self.client.push(
+ fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
+ auth_config=auth_config
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/test_image/push',
+ params={
+ 'tag': fake_api.FAKE_TAG_NAME,
+ },
+ data='{}',
+ headers={'Content-Type': 'application/json',
+ 'X-Registry-Auth': encoded_auth},
+ stream=False,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_push_image_stream(self):
+ with mock.patch('docker.auth.resolve_authconfig',
+ fake_resolve_authconfig):
+ self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/test_image/push',
+ params={
+ 'tag': None
+ },
+ data='{}',
+ headers={'Content-Type': 'application/json'},
+ stream=True,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_tag_image(self):
+ self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/e9aa60c60128/tag',
+ params={
+ 'tag': None,
+ 'repo': 'repo',
+ 'force': 0
+ },
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_tag_image_tag(self):
+ self.client.tag(
+ fake_api.FAKE_IMAGE_ID,
+ fake_api.FAKE_REPO_NAME,
+ tag=fake_api.FAKE_TAG_NAME
+ )
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/e9aa60c60128/tag',
+ params={
+ 'tag': 'tag',
+ 'repo': 'repo',
+ 'force': 0
+ },
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_tag_image_force(self):
+ self.client.tag(
+ fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/e9aa60c60128/tag',
+ params={
+ 'tag': None,
+ 'repo': 'repo',
+ 'force': 1
+ },
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_get_image(self):
+ self.client.get_image(fake_api.FAKE_IMAGE_ID)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/e9aa60c60128/get',
+ stream=True,
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_load_image(self):
+ self.client.load_image('Byte Stream....')
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/load',
+ data='Byte Stream....',
+ stream=True,
+ params={},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_load_image_quiet(self):
+ self.client.load_image('Byte Stream....', quiet=True)
+
+ fake_request.assert_called_with(
+ 'POST',
+ url_prefix + 'images/load',
+ data='Byte Stream....',
+ stream=True,
+ params={'quiet': True},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py
new file mode 100644
index 0000000..c78554d
--- /dev/null
+++ b/tests/unit/api_network_test.py
@@ -0,0 +1,169 @@
+import json
+
+import six
+
+from .api_test import BaseAPIClientTest, url_prefix, response
+from docker.types import IPAMConfig, IPAMPool
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class NetworkTest(BaseAPIClientTest):
+ def test_list_networks(self):
+ networks = [
+ {
+ "name": "none",
+ "id": "8e4e55c6863ef424",
+ "type": "null",
+ "endpoints": []
+ },
+ {
+ "name": "host",
+ "id": "062b6d9ea7913fde",
+ "type": "host",
+ "endpoints": []
+ },
+ ]
+
+ get = mock.Mock(return_value=response(
+ status_code=200, content=json.dumps(networks).encode('utf-8')))
+
+ with mock.patch('docker.api.client.APIClient.get', get):
+ assert self.client.networks() == networks
+
+ assert get.call_args[0][0] == url_prefix + 'networks'
+
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert not filters
+
+ self.client.networks(names=['foo'])
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert filters == {'name': ['foo']}
+
+ self.client.networks(ids=['123'])
+ filters = json.loads(get.call_args[1]['params']['filters'])
+ assert filters == {'id': ['123']}
+
+ def test_create_network(self):
+ network_data = {
+ "id": 'abc12345',
+ "warning": "",
+ }
+
+ network_response = response(status_code=200, content=network_data)
+ post = mock.Mock(return_value=network_response)
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ result = self.client.create_network('foo')
+ assert result == network_data
+
+ assert post.call_args[0][0] == url_prefix + 'networks/create'
+
+ assert json.loads(post.call_args[1]['data']) == {"Name": "foo"}
+
+ opts = {
+ 'com.docker.network.bridge.enable_icc': False,
+ 'com.docker.network.bridge.enable_ip_masquerade': False,
+ }
+ self.client.create_network('foo', 'bridge', opts)
+
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "foo", "Driver": "bridge", "Options": opts
+ }
+
+ ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
+ gateway="192.168.52.254")
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
+
+ self.client.create_network("bar", driver="bridge",
+ ipam=ipam_config)
+
+ assert json.loads(post.call_args[1]['data']) == {
+ "Name": "bar",
+ "Driver": "bridge",
+ "IPAM": {
+ "Driver": "default",
+ "Config": [{
+ "IPRange": None,
+ "Gateway": "192.168.52.254",
+ "Subnet": "192.168.52.0/24",
+ "AuxiliaryAddresses": None,
+ }],
+ }
+ }
+
+ def test_remove_network(self):
+ network_id = 'abc12345'
+ delete = mock.Mock(return_value=response(status_code=200))
+
+ with mock.patch('docker.api.client.APIClient.delete', delete):
+ self.client.remove_network(network_id)
+
+ args = delete.call_args
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+
+ def test_inspect_network(self):
+ network_id = 'abc12345'
+ network_name = 'foo'
+ network_data = {
+ six.u('name'): network_name,
+ six.u('id'): network_id,
+ six.u('driver'): 'bridge',
+ six.u('containers'): {},
+ }
+
+ network_response = response(status_code=200, content=network_data)
+ get = mock.Mock(return_value=network_response)
+
+ with mock.patch('docker.api.client.APIClient.get', get):
+ result = self.client.inspect_network(network_id)
+ assert result == network_data
+
+ args = get.call_args
+ assert args[0][0] == url_prefix + 'networks/{0}'.format(network_id)
+
+ def test_connect_container_to_network(self):
+ network_id = 'abc12345'
+ container_id = 'def45678'
+
+ post = mock.Mock(return_value=response(status_code=201))
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ self.client.connect_container_to_network(
+ container={'Id': container_id},
+ net_id=network_id,
+ aliases=['foo', 'bar'],
+ links=[('baz', 'quux')]
+ )
+
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/connect'.format(network_id)
+ )
+
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id,
+ 'EndpointConfig': {
+ 'Aliases': ['foo', 'bar'],
+ 'Links': ['baz:quux'],
+ },
+ }
+
+ def test_disconnect_container_from_network(self):
+ network_id = 'abc12345'
+ container_id = 'def45678'
+
+ post = mock.Mock(return_value=response(status_code=201))
+
+ with mock.patch('docker.api.client.APIClient.post', post):
+ self.client.disconnect_container_from_network(
+ container={'Id': container_id}, net_id=network_id)
+
+ assert post.call_args[0][0] == (
+ url_prefix + 'networks/{0}/disconnect'.format(network_id)
+ )
+ assert json.loads(post.call_args[1]['data']) == {
+ 'Container': container_id
+ }
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
new file mode 100644
index 0000000..af2bb1c
--- /dev/null
+++ b/tests/unit/api_test.py
@@ -0,0 +1,593 @@
+import datetime
+import json
+import io
+import os
+import re
+import shutil
+import socket
+import tempfile
+import threading
+import time
+import unittest
+
+import docker
+from docker.api import APIClient
+import requests
+from requests.packages import urllib3
+import six
+
+from . import fake_api
+
+import pytest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
+
+
+def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
+ request=None, raw=None):
+ res = requests.Response()
+ res.status_code = status_code
+ if not isinstance(content, six.binary_type):
+ content = json.dumps(content).encode('ascii')
+ res._content = content
+ res.headers = requests.structures.CaseInsensitiveDict(headers or {})
+ res.reason = reason
+ res.elapsed = datetime.timedelta(elapsed)
+ res.request = request
+ res.raw = raw
+ return res
+
+
+def fake_resolve_authconfig(authconfig, registry=None, *args, **kwargs):
+ return None
+
+
+def fake_inspect_container(self, container, tty=False):
+ return fake_api.get_fake_inspect_container(tty=tty)[1]
+
+
+def fake_resp(method, url, *args, **kwargs):
+ key = None
+ if url in fake_api.fake_responses:
+ key = url
+ elif (url, method) in fake_api.fake_responses:
+ key = (url, method)
+ if not key:
+ raise Exception('{0} {1}'.format(method, url))
+ status_code, content = fake_api.fake_responses[key]()
+ return response(status_code=status_code, content=content)
+
+
+fake_request = mock.Mock(side_effect=fake_resp)
+
+
+def fake_get(self, url, *args, **kwargs):
+ return fake_request('GET', url, *args, **kwargs)
+
+
+def fake_post(self, url, *args, **kwargs):
+ return fake_request('POST', url, *args, **kwargs)
+
+
+def fake_put(self, url, *args, **kwargs):
+ return fake_request('PUT', url, *args, **kwargs)
+
+
+def fake_delete(self, url, *args, **kwargs):
+ return fake_request('DELETE', url, *args, **kwargs)
+
+
+def fake_read_from_socket(self, response, stream, tty=False):
+ return six.binary_type()
+
+
+url_base = '{0}/'.format(fake_api.prefix)
+url_prefix = '{0}v{1}/'.format(
+ url_base,
+ docker.constants.DEFAULT_DOCKER_API_VERSION)
+
+
+class BaseAPIClientTest(unittest.TestCase):
+ def setUp(self):
+ self.patcher = mock.patch.multiple(
+ 'docker.api.client.APIClient',
+ get=fake_get,
+ post=fake_post,
+ put=fake_put,
+ delete=fake_delete,
+ _read_from_socket=fake_read_from_socket
+ )
+ self.patcher.start()
+ self.client = APIClient()
+ # Force-clear authconfig to avoid tampering with the tests
+ self.client._cfg = {'Configs': {}}
+
+ def tearDown(self):
+ self.client.close()
+ self.patcher.stop()
+
+ def base_create_payload(self, img='busybox', cmd=None):
+ if not cmd:
+ cmd = ['true']
+ return {"Tty": False, "Image": img, "Cmd": cmd,
+ "AttachStdin": False,
+ "AttachStderr": True, "AttachStdout": True,
+ "StdinOnce": False,
+ "OpenStdin": False, "NetworkDisabled": False,
+ }
+
+
+class DockerApiTest(BaseAPIClientTest):
+ def test_ctor(self):
+ with pytest.raises(docker.errors.DockerException) as excinfo:
+ APIClient(version=1.12)
+
+ assert str(
+ excinfo.value
+ ) == 'Version parameter must be a string or None. Found float'
+
+ def test_url_valid_resource(self):
+ url = self.client._url('/hello/{0}/world', 'somename')
+ assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
+
+ url = self.client._url(
+ '/hello/{0}/world/{1}', 'somename', 'someothername'
+ )
+ assert url == '{0}{1}'.format(
+ url_prefix, 'hello/somename/world/someothername'
+ )
+
+ url = self.client._url('/hello/{0}/world', 'some?name')
+ assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
+
+ url = self.client._url("/images/{0}/push", "localhost:5000/image")
+ assert url == '{0}{1}'.format(
+ url_prefix, 'images/localhost:5000/image/push'
+ )
+
+ def test_url_invalid_resource(self):
+ with pytest.raises(ValueError):
+ self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
+
+ def test_url_no_resource(self):
+ url = self.client._url('/simple')
+ assert url == '{0}{1}'.format(url_prefix, 'simple')
+
+ def test_url_unversioned_api(self):
+ url = self.client._url(
+ '/hello/{0}/world', 'somename', versioned_api=False
+ )
+ assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
+
+ def test_version(self):
+ self.client.version()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'version',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_version_no_api_version(self):
+ self.client.version(False)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_base + 'version',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_retrieve_server_version(self):
+ client = APIClient(version="auto")
+ assert isinstance(client._version, six.string_types)
+ assert not (client._version == "auto")
+ client.close()
+
+ def test_auto_retrieve_server_version(self):
+ version = self.client._retrieve_server_version()
+ assert isinstance(version, six.string_types)
+
+ def test_info(self):
+ self.client.info()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'info',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_search(self):
+ self.client.search('busybox')
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/search',
+ params={'term': 'busybox'},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_login(self):
+ self.client.login('sakuya', 'izayoi')
+ args = fake_request.call_args
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'auth'
+ assert json.loads(args[1]['data']) == {
+ 'username': 'sakuya', 'password': 'izayoi'
+ }
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert self.client._auth_configs['auths'] == {
+ 'docker.io': {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
+ }
+ }
+
+ def test_events(self):
+ self.client.events()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={'since': None, 'until': None, 'filters': None},
+ stream=True,
+ timeout=None
+ )
+
+ def test_events_with_since_until(self):
+ ts = 1356048000
+ now = datetime.datetime.utcfromtimestamp(ts)
+ since = now - datetime.timedelta(seconds=10)
+ until = now + datetime.timedelta(seconds=10)
+
+ self.client.events(since=since, until=until)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={
+ 'since': ts - 10,
+ 'until': ts + 10,
+ 'filters': None
+ },
+ stream=True,
+ timeout=None
+ )
+
+ def test_events_with_filters(self):
+ filters = {'event': ['die', 'stop'],
+ 'container': fake_api.FAKE_CONTAINER_ID}
+
+ self.client.events(filters=filters)
+
+ expected_filters = docker.utils.convert_filters(filters)
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={
+ 'since': None,
+ 'until': None,
+ 'filters': expected_filters
+ },
+ stream=True,
+ timeout=None
+ )
+
+ def _socket_path_for_client_session(self, client):
+ socket_adapter = client.get_adapter('http+docker://')
+ return socket_adapter.socket_path
+
+ def test_url_compatibility_unix(self):
+ c = APIClient(base_url="unix://socket")
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_unix_triple_slash(self):
+ c = APIClient(base_url="unix:///socket")
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_http_unix_triple_slash(self):
+ c = APIClient(base_url="http+unix:///socket")
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_http(self):
+ c = APIClient(base_url="http://hostname:1234")
+
+ assert c.base_url == "http://hostname:1234"
+
+ def test_url_compatibility_tcp(self):
+ c = APIClient(base_url="tcp://hostname:1234")
+
+ assert c.base_url == "http://hostname:1234"
+
+ def test_remove_link(self):
+ self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
+
+ fake_request.assert_called_with(
+ 'DELETE',
+ url_prefix + 'containers/3cc2351ab11b',
+ params={'v': False, 'link': True, 'force': False},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_create_host_config_secopt(self):
+ security_opt = ['apparmor:test_profile']
+ result = self.client.create_host_config(security_opt=security_opt)
+ assert 'SecurityOpt' in result
+ assert result['SecurityOpt'] == security_opt
+ with pytest.raises(TypeError):
+ self.client.create_host_config(security_opt='wrong')
+
+ def test_stream_helper_decoding(self):
+ status_code, content = fake_api.fake_responses[url_prefix + 'events']()
+ content_str = json.dumps(content)
+ if six.PY3:
+ content_str = content_str.encode('utf-8')
+ body = io.BytesIO(content_str)
+
+ # mock a stream interface
+ raw_resp = urllib3.HTTPResponse(body=body)
+ setattr(raw_resp._fp, 'chunked', True)
+ setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
+
+ # pass `decode=False` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ assert result == content_str
+
+ # pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ assert result == content
+
+ # non-chunked response, pass `decode=False` to the helper
+ setattr(raw_resp._fp, 'chunked', False)
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ assert result == content_str.decode('utf-8')
+
+ # non-chunked response, pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ assert result == content
+
+
+class UnixSocketStreamTest(unittest.TestCase):
+ def setUp(self):
+ socket_dir = tempfile.mkdtemp()
+ self.build_context = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, socket_dir)
+ self.addCleanup(shutil.rmtree, self.build_context)
+ self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
+ self.server_socket = self._setup_socket()
+ self.stop_server = False
+ server_thread = threading.Thread(target=self.run_server)
+ server_thread.setDaemon(True)
+ server_thread.start()
+ self.response = None
+ self.request_handler = None
+ self.addCleanup(server_thread.join)
+ self.addCleanup(self.stop)
+
+ def stop(self):
+ self.stop_server = True
+
+ def _setup_socket(self):
+ server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ server_sock.bind(self.socket_file)
+ # Non-blocking mode so that we can shut the test down easily
+ server_sock.setblocking(0)
+ server_sock.listen(5)
+ return server_sock
+
+ def run_server(self):
+ try:
+ while not self.stop_server:
+ try:
+ connection, client_address = self.server_socket.accept()
+ except socket.error:
+ # Probably no connection to accept yet
+ time.sleep(0.01)
+ continue
+
+ connection.setblocking(1)
+ try:
+ self.request_handler(connection)
+ finally:
+ connection.close()
+ finally:
+ self.server_socket.close()
+
+ def early_response_sending_handler(self, connection):
+ data = b''
+ headers = None
+
+ connection.sendall(self.response)
+ while not headers:
+ data += connection.recv(2048)
+ parts = data.split(b'\r\n\r\n', 1)
+ if len(parts) == 2:
+ headers, data = parts
+
+ mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
+ assert mo
+ content_length = int(mo.group(1))
+
+ while True:
+ if len(data) >= content_length:
+ break
+
+ data += connection.recv(2048)
+
+ @pytest.mark.skipif(
+ docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
+ )
+ def test_early_stream_response(self):
+ self.request_handler = self.early_response_sending_handler
+ lines = []
+ for i in range(0, 50):
+ line = str(i).encode()
+ lines += [('%x' % len(line)).encode(), line]
+ lines.append(b'0')
+ lines.append(b'')
+
+ self.response = (
+ b'HTTP/1.1 200 OK\r\n'
+ b'Transfer-Encoding: chunked\r\n'
+ b'\r\n'
+ ) + b'\r\n'.join(lines)
+
+ with APIClient(base_url="http+unix://" + self.socket_file) as client:
+ for i in range(5):
+ try:
+ stream = client.build(
+ path=self.build_context,
+ )
+ break
+ except requests.ConnectionError as e:
+ if i == 4:
+ raise e
+
+ assert list(stream) == [
+ str(i).encode() for i in range(50)
+ ]
+
+
+class TCPSocketStreamTest(unittest.TestCase):
+ text_data = b'''
+ Now, those children out there, they're jumping through the
+ flames in the hope that the god of the fire will make them fruitful.
+ Really, you can't blame them. After all, what girl would not prefer the
+ child of a god to that of some acne-scarred artisan?
+ '''
+
+ def setUp(self):
+
+ self.server = six.moves.socketserver.ThreadingTCPServer(
+ ('', 0), self.get_handler_class()
+ )
+ self.thread = threading.Thread(target=self.server.serve_forever)
+ self.thread.setDaemon(True)
+ self.thread.start()
+ self.address = 'http://{}:{}'.format(
+ socket.gethostname(), self.server.server_address[1]
+ )
+
+ def tearDown(self):
+ self.server.shutdown()
+ self.server.server_close()
+ self.thread.join()
+
+ def get_handler_class(self):
+ text_data = self.text_data
+
+ class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
+ def do_POST(self):
+ self.send_response(101)
+ self.send_header(
+ 'Content-Type', 'application/vnd.docker.raw-stream'
+ )
+ self.send_header('Connection', 'Upgrade')
+ self.send_header('Upgrade', 'tcp')
+ self.end_headers()
+ self.wfile.flush()
+ time.sleep(0.2)
+ self.wfile.write(text_data)
+ self.wfile.flush()
+
+ return Handler
+
+ def test_read_from_socket(self):
+ with APIClient(base_url=self.address) as client:
+ resp = client._post(client._url('/dummy'), stream=True)
+ data = client._read_from_socket(resp, stream=True, tty=True)
+ results = b''.join(data)
+
+ assert results == self.text_data
+
+
+class UserAgentTest(unittest.TestCase):
+ def setUp(self):
+ self.patcher = mock.patch.object(
+ APIClient,
+ 'send',
+ return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
+ )
+ self.mock_send = self.patcher.start()
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ def test_default_user_agent(self):
+ client = APIClient()
+ client.version()
+
+ assert self.mock_send.call_count == 1
+ headers = self.mock_send.call_args[0][0].headers
+ expected = 'docker-sdk-python/%s' % docker.__version__
+ assert headers['User-Agent'] == expected
+
+ def test_custom_user_agent(self):
+ client = APIClient(user_agent='foo/bar')
+ client.version()
+
+ assert self.mock_send.call_count == 1
+ headers = self.mock_send.call_args[0][0].headers
+ assert headers['User-Agent'] == 'foo/bar'
+
+
+class DisableSocketTest(unittest.TestCase):
+ class DummySocket(object):
+ def __init__(self, timeout=60):
+ self.timeout = timeout
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
+
+ def gettimeout(self):
+ return self.timeout
+
+ def setUp(self):
+ self.client = APIClient()
+
+ def test_disable_socket_timeout(self):
+ """Test that the timeout is disabled on a generic socket object."""
+ socket = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+
+ def test_disable_socket_timeout2(self):
+ """Test that the timeouts are disabled on a generic socket object
+ and it's _sock object if present."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout is None
+
+ def test_disable_socket_timout_non_blocking(self):
+ """Test that a non-blocking socket does not get set to blocking."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket(0.0)
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout == 0.0
diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py
new file mode 100644
index 0000000..7850c22
--- /dev/null
+++ b/tests/unit/api_volume_test.py
@@ -0,0 +1,115 @@
+import json
+
+import pytest
+
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
+
+
+class VolumeTest(BaseAPIClientTest):
+ def test_list_volumes(self):
+ volumes = self.client.volumes()
+ assert 'Volumes' in volumes
+ assert len(volumes['Volumes']) == 2
+ args = fake_request.call_args
+
+ assert args[0][0] == 'GET'
+ assert args[0][1] == url_prefix + 'volumes'
+
+ def test_list_volumes_and_filters(self):
+ volumes = self.client.volumes(filters={'dangling': True})
+ assert 'Volumes' in volumes
+ assert len(volumes['Volumes']) == 2
+ args = fake_request.call_args
+
+ assert args[0][0] == 'GET'
+ assert args[0][1] == url_prefix + 'volumes'
+ assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'},
+ 'timeout': 60}
+
+ def test_create_volume(self):
+ name = 'perfectcherryblossom'
+ result = self.client.create_volume(name)
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+ args = fake_request.call_args
+
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
+ assert json.loads(args[1]['data']) == {'Name': name}
+
+ @requires_api_version('1.23')
+ def test_create_volume_with_labels(self):
+ name = 'perfectcherryblossom'
+ result = self.client.create_volume(name, labels={
+ 'com.example.some-label': 'some-value'
+ })
+ assert result["Labels"] == {
+ 'com.example.some-label': 'some-value'
+ }
+
+ @requires_api_version('1.23')
+ def test_create_volume_with_invalid_labels(self):
+ name = 'perfectcherryblossom'
+ with pytest.raises(TypeError):
+ self.client.create_volume(name, labels=1)
+
+ def test_create_volume_with_driver(self):
+ name = 'perfectcherryblossom'
+ driver_name = 'sshfs'
+ self.client.create_volume(name, driver=driver_name)
+ args = fake_request.call_args
+
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'volumes/create'
+ data = json.loads(args[1]['data'])
+ assert 'Driver' in data
+ assert data['Driver'] == driver_name
+
+ def test_create_volume_invalid_opts_type(self):
+ with pytest.raises(TypeError):
+ self.client.create_volume(
+ 'perfectcherryblossom', driver_opts='hello=world'
+ )
+
+ with pytest.raises(TypeError):
+ self.client.create_volume(
+ 'perfectcherryblossom', driver_opts=['hello=world']
+ )
+
+ with pytest.raises(TypeError):
+ self.client.create_volume(
+ 'perfectcherryblossom', driver_opts=''
+ )
+
+ @requires_api_version('1.24')
+ def test_create_volume_with_no_specified_name(self):
+ result = self.client.create_volume(name=None)
+ assert 'Name' in result
+ assert result['Name'] is not None
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+ assert 'Scope' in result
+ assert result['Scope'] == 'local'
+
+ def test_inspect_volume(self):
+ name = 'perfectcherryblossom'
+ result = self.client.inspect_volume(name)
+ assert 'Name' in result
+ assert result['Name'] == name
+ assert 'Driver' in result
+ assert result['Driver'] == 'local'
+ args = fake_request.call_args
+
+ assert args[0][0] == 'GET'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
+
+ def test_remove_volume(self):
+ name = 'perfectcherryblossom'
+ self.client.remove_volume(name)
+ args = fake_request.call_args
+
+ assert args[0][0] == 'DELETE'
+ assert args[0][1] == '{0}volumes/{1}'.format(url_prefix, name)
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
new file mode 100644
index 0000000..947d680
--- /dev/null
+++ b/tests/unit/auth_test.py
@@ -0,0 +1,506 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import json
+import os
+import os.path
+import random
+import shutil
+import tempfile
+import unittest
+
+from docker import auth, errors
+import pytest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class RegressionTest(unittest.TestCase):
+ def test_803_urlsafe_encode(self):
+ auth_data = {
+ 'username': 'root',
+ 'password': 'GR?XGR?XGR?XGR?X'
+ }
+ encoded = auth.encode_header(auth_data)
+ assert b'/' not in encoded
+ assert b'_' in encoded
+
+
+class ResolveRepositoryNameTest(unittest.TestCase):
+ def test_resolve_repository_name_hub_library_image(self):
+ assert auth.resolve_repository_name('image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_resolve_repository_name_dotted_hub_library_image(self):
+ assert auth.resolve_repository_name('image.valid') == (
+ 'docker.io', 'image.valid'
+ )
+
+ def test_resolve_repository_name_hub_image(self):
+ assert auth.resolve_repository_name('username/image') == (
+ 'docker.io', 'username/image'
+ )
+
+ def test_explicit_hub_index_library_image(self):
+ assert auth.resolve_repository_name('docker.io/image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_explicit_legacy_hub_index_library_image(self):
+ assert auth.resolve_repository_name('index.docker.io/image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry(self):
+ assert auth.resolve_repository_name('my.registry.net/image') == (
+ 'my.registry.net', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry_with_port(self):
+ assert auth.resolve_repository_name('my.registry.net:5000/image') == (
+ 'my.registry.net:5000', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry_with_username(self):
+ assert auth.resolve_repository_name(
+ 'my.registry.net/username/image'
+ ) == ('my.registry.net', 'username/image')
+
+ def test_resolve_repository_name_no_dots_but_port(self):
+ assert auth.resolve_repository_name('hostname:5000/image') == (
+ 'hostname:5000', 'image'
+ )
+
+ def test_resolve_repository_name_no_dots_but_port_and_username(self):
+ assert auth.resolve_repository_name(
+ 'hostname:5000/username/image'
+ ) == ('hostname:5000', 'username/image')
+
+ def test_resolve_repository_name_localhost(self):
+ assert auth.resolve_repository_name('localhost/image') == (
+ 'localhost', 'image'
+ )
+
+ def test_resolve_repository_name_localhost_with_username(self):
+ assert auth.resolve_repository_name('localhost/username/image') == (
+ 'localhost', 'username/image'
+ )
+
+ def test_invalid_index_name(self):
+ with pytest.raises(errors.InvalidRepository):
+ auth.resolve_repository_name('-gecko.com/image')
+
+
+def encode_auth(auth_info):
+ return base64.b64encode(
+ auth_info.get('username', '').encode('utf-8') + b':' +
+ auth_info.get('password', '').encode('utf-8'))
+
+
+class ResolveAuthTest(unittest.TestCase):
+ index_config = {'auth': encode_auth({'username': 'indexuser'})}
+ private_config = {'auth': encode_auth({'username': 'privateuser'})}
+ legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
+
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': index_config,
+ 'my.registry.net': private_config,
+ 'http://legacy.registry.url/v1/': legacy_config,
+ })
+ }
+
+ def test_resolve_authconfig_hostname_only(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_protocol(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_trailing_slash(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_wrong_secure_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://index.docker.io'
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_path_wrong_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_default_registry(self):
+ assert auth.resolve_authconfig(
+ self.auth_config
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_default_explicit_none(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, None
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_fully_explicit(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_legacy_config(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'legacy.registry.url'
+ )['username'] == 'legacyauth'
+
+ def test_resolve_authconfig_no_match(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'does.not.exist'
+ ) is None
+
+ def test_resolve_registry_and_auth_library_image(self):
+ image = 'image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_hub_image(self):
+ image = 'username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_explicit_hub(self):
+ image = 'docker.io/username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_explicit_legacy_hub(self):
+ image = 'index.docker.io/username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_private_registry(self):
+ image = 'my.registry.net/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'privateuser'
+
+ def test_resolve_registry_and_auth_unauthenticated_registry(self):
+ image = 'other.registry.net/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ ) is None
+
+ def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
+ auth_config = {
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': self.index_config,
+ }),
+ 'credsStore': 'blackbox'
+ }
+ with mock.patch('docker.auth._resolve_authconfig_credstore') as m:
+ m.return_value = None
+ assert 'indexuser' == auth.resolve_authconfig(
+ auth_config, None
+ )['username']
+
+
+class CredStoreTest(unittest.TestCase):
+ def test_get_credential_store(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ }
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = {
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ }
+
+ assert auth.get_credential_store(auth_config, None) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'docker.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'images.io'
+ ) == 'truesecret'
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = auth.load_config(folder)
+ assert cfg is not None
+
+ def test_load_legacy_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ with open(cfg_path, 'w') as f:
+ f.write('auth = {0}\n'.format(auth_))
+ f.write('email = sakuya@scarlet.net')
+
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg['auths'][auth.INDEX_NAME] is not None
+ cfg = cfg['auths'][auth.INDEX_NAME]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
+
+ def test_load_json_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ email = 'sakuya@scarlet.net'
+ with open(cfg_path, 'w') as f:
+ json.dump(
+ {auth.INDEX_URL: {'auth': auth_, 'email': email}}, f
+ )
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg['auths'][auth.INDEX_URL] is not None
+ cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == email
+ assert cfg.get('Auth') is None
+
+ def test_load_modern_json_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, 'config.json')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ email = 'sakuya@scarlet.net'
+ with open(cfg_path, 'w') as f:
+ json.dump({
+ 'auths': {
+ auth.INDEX_URL: {
+ 'auth': auth_, 'email': email
+ }
+ }
+ }, f)
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg['auths'][auth.INDEX_URL] is not None
+ cfg = cfg['auths'][auth.INDEX_URL]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == email
+
+ def test_load_config_with_random_name(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder,
+ '.{0}.dockercfg'.format(
+ random.randrange(100000)))
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ registry: {
+ 'auth': '{0}'.format(auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)['auths']
+ assert registry in cfg
+ assert cfg[registry] is not None
+ cfg = cfg[registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ registry: {
+ 'auth': '{0}'.format(auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)['auths']
+ assert registry in cfg
+ assert cfg[registry] is not None
+ cfg = cfg[registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env_with_auths(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': '{0}'.format(auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env_utf8(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(
+ b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': '{0}'.format(auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
+ assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_unknown_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert cfg == {'auths': {}}
+
+ def test_load_config_invalid_auth_dict(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config = {
+ 'auths': {
+ 'scarlet.net': {'sakuya': 'izayoi'}
+ }
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert cfg == {'auths': {'scarlet.net': {}}}
+
+ def test_load_config_identity_token(self):
+ folder = tempfile.mkdtemp()
+ registry = 'scarlet.net'
+ token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': auth_entry,
+ 'identitytoken': token
+ }
+ }
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert registry in cfg['auths']
+ cfg = cfg['auths'][registry]
+ assert 'IdentityToken' in cfg
+ assert cfg['IdentityToken'] == token
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
new file mode 100644
index 0000000..cce99c5
--- /dev/null
+++ b/tests/unit/client_test.py
@@ -0,0 +1,112 @@
+import datetime
+import docker
+from docker.utils import kwargs_from_env
+from docker.constants import (
+ DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
+)
+import os
+import unittest
+
+from . import fake_api
+import pytest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+
+
+class ClientTest(unittest.TestCase):
+
+ @mock.patch('docker.api.APIClient.events')
+ def test_events(self, mock_func):
+ since = datetime.datetime(2016, 1, 1, 0, 0)
+ mock_func.return_value = fake_api.get_fake_events()[1]
+ client = docker.from_env()
+ assert client.events(since=since) == mock_func.return_value
+ mock_func.assert_called_with(since=since)
+
+ @mock.patch('docker.api.APIClient.info')
+ def test_info(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_info()[1]
+ client = docker.from_env()
+ assert client.info() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.ping')
+ def test_ping(self, mock_func):
+ mock_func.return_value = True
+ client = docker.from_env()
+ assert client.ping() is True
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.version')
+ def test_version(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_version()[1]
+ client = docker.from_env()
+ assert client.version() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ def test_call_api_client_method(self):
+ client = docker.from_env()
+ with pytest.raises(AttributeError) as cm:
+ client.create_container()
+ s = cm.exconly()
+ assert "'DockerClient' object has no attribute 'create_container'" in s
+ assert "this method is now on the object APIClient" in s
+
+ with pytest.raises(AttributeError) as cm:
+ client.abcdef()
+ s = cm.exconly()
+ assert "'DockerClient' object has no attribute 'abcdef'" in s
+ assert "this method is now on the object APIClient" not in s
+
+ def test_call_containers(self):
+ client = docker.DockerClient(**kwargs_from_env())
+
+ with pytest.raises(TypeError) as cm:
+ client.containers()
+
+ s = cm.exconly()
+ assert "'ContainerCollection' object is not callable" in s
+ assert "docker.APIClient" in s
+
+
+class FromEnvTest(unittest.TestCase):
+
+ def setUp(self):
+ self.os_environ = os.environ.copy()
+
+ def tearDown(self):
+ os.environ = self.os_environ
+
+ def test_from_env(self):
+ """Test that environment variables are passed through to
+ utils.kwargs_from_env(). KwargsFromEnvTest tests that environment
+ variables are parsed correctly."""
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ client = docker.from_env()
+ assert client.api.base_url == "https://192.168.59.103:2376"
+
+ def test_from_env_with_version(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ client = docker.from_env(version='2.32')
+ assert client.api.base_url == "https://192.168.59.103:2376"
+ assert client.api._version == '2.32'
+
+ def test_from_env_without_version_uses_default(self):
+ client = docker.from_env()
+
+ assert client.api._version == DEFAULT_DOCKER_API_VERSION
+
+ def test_from_env_without_timeout_uses_default(self):
+ client = docker.from_env()
+
+ assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
new file mode 100644
index 0000000..2be0578
--- /dev/null
+++ b/tests/unit/dockertypes_test.py
@@ -0,0 +1,470 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+
+import pytest
+
+from docker.constants import DEFAULT_DOCKER_API_VERSION
+from docker.errors import InvalidArgument, InvalidVersion
+from docker.types import (
+ ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
+ IPAMPool, LogConfig, Mount, ServiceMode, Ulimit,
+)
+from docker.types.services import convert_service_ports
+
+try:
+ from unittest import mock
+except:
+ import mock
+
+
+def create_host_config(*args, **kwargs):
+ return HostConfig(*args, **kwargs)
+
+
+class HostConfigTest(unittest.TestCase):
+ def test_create_host_config_no_options_newer_api_version(self):
+ config = create_host_config(version='1.21')
+ assert config['NetworkMode'] == 'default'
+
+ def test_create_host_config_invalid_cpu_cfs_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_quota='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_period='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_quota=23.11)
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.21', cpu_period=1999.0)
+
+ def test_create_host_config_with_cpu_quota(self):
+ config = create_host_config(version='1.21', cpu_quota=1999)
+ assert config.get('CpuQuota') == 1999
+
+ def test_create_host_config_with_cpu_period(self):
+ config = create_host_config(version='1.21', cpu_period=1999)
+ assert config.get('CpuPeriod') == 1999
+
+ def test_create_host_config_with_blkio_constraints(self):
+ blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
+ config = create_host_config(
+ version='1.22', blkio_weight=1999, blkio_weight_device=blkio_rate,
+ device_read_bps=blkio_rate, device_write_bps=blkio_rate,
+ device_read_iops=blkio_rate, device_write_iops=blkio_rate
+ )
+
+ assert config.get('BlkioWeight') == 1999
+ assert config.get('BlkioWeightDevice') is blkio_rate
+ assert config.get('BlkioDeviceReadBps') is blkio_rate
+ assert config.get('BlkioDeviceWriteBps') is blkio_rate
+ assert config.get('BlkioDeviceReadIOps') is blkio_rate
+ assert config.get('BlkioDeviceWriteIOps') is blkio_rate
+ assert blkio_rate[0]['Path'] == "/dev/sda"
+ assert blkio_rate[0]['Rate'] == 1000
+
+ def test_create_host_config_with_shm_size(self):
+ config = create_host_config(version='1.22', shm_size=67108864)
+ assert config.get('ShmSize') == 67108864
+
+ def test_create_host_config_with_shm_size_in_mb(self):
+ config = create_host_config(version='1.22', shm_size='64M')
+ assert config.get('ShmSize') == 67108864
+
+ def test_create_host_config_with_oom_kill_disable(self):
+ config = create_host_config(version='1.21', oom_kill_disable=True)
+ assert config.get('OomKillDisable') is True
+
+ def test_create_host_config_with_userns_mode(self):
+ config = create_host_config(version='1.23', userns_mode='host')
+ assert config.get('UsernsMode') == 'host'
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', userns_mode='host')
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', userns_mode='host12')
+
+ def test_create_host_config_with_oom_score_adj(self):
+ config = create_host_config(version='1.22', oom_score_adj=100)
+ assert config.get('OomScoreAdj') == 100
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.21', oom_score_adj=100)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.22', oom_score_adj='100')
+
+ def test_create_host_config_with_dns_opt(self):
+
+ tested_opts = ['use-vc', 'no-tld-query']
+ config = create_host_config(version='1.21', dns_opt=tested_opts)
+ dns_opts = config.get('DnsOptions')
+
+ assert 'use-vc' in dns_opts
+ assert 'no-tld-query' in dns_opts
+
+ def test_create_host_config_with_mem_reservation(self):
+ config = create_host_config(version='1.21', mem_reservation=67108864)
+ assert config.get('MemoryReservation') == 67108864
+
+ def test_create_host_config_with_kernel_memory(self):
+ config = create_host_config(version='1.21', kernel_memory=67108864)
+ assert config.get('KernelMemory') == 67108864
+
+ def test_create_host_config_with_pids_limit(self):
+ config = create_host_config(version='1.23', pids_limit=1024)
+ assert config.get('PidsLimit') == 1024
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', pids_limit=1024)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.23', pids_limit='1024')
+
+ def test_create_host_config_with_isolation(self):
+ config = create_host_config(version='1.24', isolation='hyperv')
+ assert config.get('Isolation') == 'hyperv'
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.23', isolation='hyperv')
+ with pytest.raises(TypeError):
+ create_host_config(
+ version='1.24', isolation={'isolation': 'hyperv'}
+ )
+
+ def test_create_host_config_pid_mode(self):
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', pid_mode='baccab125')
+
+ config = create_host_config(version='1.23', pid_mode='host')
+ assert config.get('PidMode') == 'host'
+ config = create_host_config(version='1.24', pid_mode='baccab125')
+ assert config.get('PidMode') == 'baccab125'
+
+ def test_create_host_config_invalid_mem_swappiness(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.24', mem_swappiness='40')
+
+ def test_create_host_config_with_volume_driver(self):
+ config = create_host_config(version='1.21', volume_driver='local')
+ assert config.get('VolumeDriver') == 'local'
+
+ def test_create_host_config_invalid_cpu_count_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_count='1')
+
+ def test_create_host_config_with_cpu_count(self):
+ config = create_host_config(version='1.25', cpu_count=2)
+ assert config.get('CpuCount') == 2
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_count=1)
+
+ def test_create_host_config_invalid_cpu_percent_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_percent='1')
+
+ def test_create_host_config_with_cpu_percent(self):
+ config = create_host_config(version='1.25', cpu_percent=15)
+ assert config.get('CpuPercent') == 15
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_percent=10)
+
+ def test_create_host_config_invalid_nano_cpus_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', nano_cpus='0')
+
+ def test_create_host_config_with_nano_cpus(self):
+ config = create_host_config(version='1.25', nano_cpus=1000)
+ assert config.get('NanoCpus') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', nano_cpus=1)
+
+ def test_create_host_config_with_cpu_rt_period_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_period='1000')
+
+ def test_create_host_config_with_cpu_rt_period(self):
+ config = create_host_config(version='1.25', cpu_rt_period=1000)
+ assert config.get('CPURealtimePeriod') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_period=1000)
+
+ def test_ctrate_host_config_with_cpu_rt_runtime_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_rt_runtime='1000')
+
+ def test_create_host_config_with_cpu_rt_runtime(self):
+ config = create_host_config(version='1.25', cpu_rt_runtime=1000)
+ assert config.get('CPURealtimeRuntime') == 1000
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.24', cpu_rt_runtime=1000)
+
+
+class ContainerSpecTest(unittest.TestCase):
+ def test_parse_mounts(self):
+ spec = ContainerSpec(
+ image='scratch', mounts=[
+ '/local:/container',
+ '/local2:/container2:ro',
+ Mount(target='/target', source='/source')
+ ]
+ )
+
+ assert 'Mounts' in spec
+ assert len(spec['Mounts']) == 3
+ for mount in spec['Mounts']:
+ assert isinstance(mount, Mount)
+
+
+class UlimitTest(unittest.TestCase):
+ def test_create_host_config_dict_ulimit(self):
+ ulimit_dct = {'name': 'nofile', 'soft': 8096}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['name']
+ assert ulimit_obj.soft == ulimit_dct['soft']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
+
+ def test_create_host_config_dict_ulimit_capitals(self):
+ ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj.name == ulimit_dct['Name']
+ assert ulimit_obj.soft == ulimit_dct['Soft']
+ assert ulimit_obj.hard == ulimit_dct['Hard']
+ assert ulimit_obj['Soft'] == ulimit_obj.soft
+
+ def test_create_host_config_obj_ulimit(self):
+ ulimit_dct = Ulimit(name='nofile', soft=8096)
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ assert 'Ulimits' in config
+ assert len(config['Ulimits']) == 1
+ ulimit_obj = config['Ulimits'][0]
+ assert isinstance(ulimit_obj, Ulimit)
+ assert ulimit_obj == ulimit_dct
+
+ def test_ulimit_invalid_type(self):
+ with pytest.raises(ValueError):
+ Ulimit(name=None)
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', soft='123')
+ with pytest.raises(ValueError):
+ Ulimit(name='hello', hard='456')
+
+
+class LogConfigTest(unittest.TestCase):
+ def test_create_host_config_dict_logconfig(self):
+ dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=dct
+ )
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert dct['type'] == config['LogConfig'].type
+
+ def test_create_host_config_obj_logconfig(self):
+ obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=obj
+ )
+ assert 'LogConfig' in config
+ assert isinstance(config['LogConfig'], LogConfig)
+ assert obj == config['LogConfig']
+
+ def test_logconfig_invalid_config_type(self):
+ with pytest.raises(ValueError):
+ LogConfig(type=LogConfig.types.JSON, config='helloworld')
+
+
+class EndpointConfigTest(unittest.TestCase):
+ def test_create_endpoint_config_with_aliases(self):
+ config = EndpointConfig(version='1.22', aliases=['foo', 'bar'])
+ assert config == {'Aliases': ['foo', 'bar']}
+
+ with pytest.raises(InvalidVersion):
+ EndpointConfig(version='1.21', aliases=['foo', 'bar'])
+
+
+class IPAMConfigTest(unittest.TestCase):
+ def test_create_ipam_config(self):
+ ipam_pool = IPAMPool(subnet='192.168.52.0/24',
+ gateway='192.168.52.254')
+
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool])
+ assert ipam_config == {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '192.168.52.0/24',
+ 'Gateway': '192.168.52.254',
+ 'AuxiliaryAddresses': None,
+ 'IPRange': None,
+ }]
+ }
+
+
+class ServiceModeTest(unittest.TestCase):
+ def test_replicated_simple(self):
+ mode = ServiceMode('replicated')
+ assert mode == {'replicated': {}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas is None
+
+ def test_global_simple(self):
+ mode = ServiceMode('global')
+ assert mode == {'global': {}}
+ assert mode.mode == 'global'
+ assert mode.replicas is None
+
+ def test_global_replicas_error(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('global', 21)
+
+ def test_replicated_replicas(self):
+ mode = ServiceMode('replicated', 21)
+ assert mode == {'replicated': {'Replicas': 21}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 21
+
+ def test_replicated_replicas_0(self):
+ mode = ServiceMode('replicated', 0)
+ assert mode == {'replicated': {'Replicas': 0}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 0
+
+ def test_invalid_mode(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('foobar')
+
+
+class MountTest(unittest.TestCase):
+ def test_parse_mount_string_ro(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:ro")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['ReadOnly'] is True
+
+ def test_parse_mount_string_rw(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:rw")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_short_form(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_no_source(self):
+ mount = Mount.parse_mount_string("foo/bar")
+ assert mount['Source'] is None
+ assert mount['Target'] == "foo/bar"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_invalid(self):
+ with pytest.raises(InvalidArgument):
+ Mount.parse_mount_string("foo:bar:baz:rw")
+
+ def test_parse_mount_named_volume(self):
+ mount = Mount.parse_mount_string("foobar:/baz")
+ assert mount['Source'] == 'foobar'
+ assert mount['Target'] == '/baz'
+ assert mount['Type'] == 'volume'
+
+ def test_parse_mount_bind(self):
+ mount = Mount.parse_mount_string('/foo/bar:/baz')
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
+
+ @pytest.mark.xfail
+ def test_parse_mount_bind_windows(self):
+ with mock.patch('docker.types.services.IS_WINDOWS_PLATFORM', True):
+ mount = Mount.parse_mount_string('C:/foo/bar:/baz')
+ assert mount['Source'] == "C:/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
+
+
+class ServicePortsTest(unittest.TestCase):
+ def test_convert_service_ports_simple(self):
+ ports = {8080: 80}
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol(self):
+ ports = {8080: (80, 'udp')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ }]
+
+ def test_convert_service_ports_with_protocol_and_mode(self):
+ ports = {8080: (80, 'udp', 'ingress')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'udp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'ingress',
+ }]
+
+ def test_convert_service_ports_invalid(self):
+ ports = {8080: ('way', 'too', 'many', 'items', 'here')}
+
+ with pytest.raises(ValueError):
+ convert_service_ports(ports)
+
+ def test_convert_service_ports_no_protocol_and_mode(self):
+ ports = {8080: (80, None, 'host')}
+
+ assert convert_service_ports(ports) == [{
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ }]
+
+ def test_convert_service_ports_multiple(self):
+ ports = {
+ 8080: (80, None, 'host'),
+ 9999: 99,
+ 2375: (2375,)
+ }
+
+ converted_ports = convert_service_ports(ports)
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 8080,
+ 'TargetPort': 80,
+ 'PublishMode': 'host',
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 9999,
+ 'TargetPort': 99,
+ } in converted_ports
+
+ assert {
+ 'Protocol': 'tcp',
+ 'PublishedPort': 2375,
+ 'TargetPort': 2375,
+ } in converted_ports
+
+ assert len(converted_ports) == 3
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
new file mode 100644
index 0000000..e27a9b1
--- /dev/null
+++ b/tests/unit/errors_test.py
@@ -0,0 +1,133 @@
+import unittest
+
+import requests
+
+from docker.errors import (APIError, ContainerError, DockerException,
+ create_unexpected_kwargs_error,
+ create_api_error_from_http_exception)
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class APIErrorTest(unittest.TestCase):
+ def test_api_error_is_caught_by_dockerexception(self):
+ try:
+ raise APIError("this should be caught by DockerException")
+ except DockerException:
+ pass
+
+ def test_status_code_200(self):
+ """The status_code property is present with 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.status_code == 200
+
+ def test_status_code_400(self):
+ """The status_code property is present with 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.status_code == 400
+
+ def test_status_code_500(self):
+ """The status_code property is present with 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.status_code == 500
+
+ def test_is_server_error_200(self):
+ """Report not server error on 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_300(self):
+ """Report not server error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_400(self):
+ """Report not server error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_500(self):
+ """Report server error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_server_error() is True
+
+ def test_is_client_error_500(self):
+ """Report not client error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_client_error() is False
+
+ def test_is_client_error_400(self):
+ """Report client error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_client_error() is True
+
+ def test_create_error_from_exception(self):
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ try:
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
+
+
+class ContainerErrorTest(unittest.TestCase):
+ def test_container_without_stderr(self):
+ """The massage does not contain stderr"""
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ command = "echo Hello World"
+ exit_status = 42
+ image = FAKE_IMAGE_ID
+ stderr = None
+
+ err = ContainerError(container, exit_status, command, image, stderr)
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}"
+ ).format(command, image, exit_status, stderr)
+ assert str(err) == msg
+
+ def test_container_with_stderr(self):
+ """The massage contains stderr"""
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ command = "echo Hello World"
+ exit_status = 42
+ image = FAKE_IMAGE_ID
+ stderr = "Something went wrong"
+
+ err = ContainerError(container, exit_status, command, image, stderr)
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
+ "{}").format(command, image, exit_status, stderr)
+ assert str(err) == msg
+
+
+class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
+ def test_create_unexpected_kwargs_error_single(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
+ assert str(e) == "f() got an unexpected keyword argument 'foo'"
+
+ def test_create_unexpected_kwargs_error_multiple(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
+ assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
new file mode 100644
index 0000000..e609b64
--- /dev/null
+++ b/tests/unit/fake_api.py
@@ -0,0 +1,645 @@
+from . import fake_stat
+from docker import constants
+
+CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
+
+FAKE_CONTAINER_ID = '3cc2351ab11b'
+FAKE_IMAGE_ID = 'e9aa60c60128'
+FAKE_EXEC_ID = 'd5d177f121dc'
+FAKE_NETWORK_ID = '33fb6a3462b8'
+FAKE_IMAGE_NAME = 'test_image'
+FAKE_TARBALL_PATH = '/path/to/tarball'
+FAKE_REPO_NAME = 'repo'
+FAKE_TAG_NAME = 'tag'
+FAKE_FILE_NAME = 'file'
+FAKE_URL = 'myurl'
+FAKE_PATH = '/path'
+FAKE_VOLUME_NAME = 'perfectcherryblossom'
+FAKE_NODE_ID = '24ifsmvkjbyhk'
+
+# Each method is prefixed with HTTP method (get, post...)
+# for clarity and readability
+
+
+def get_fake_version():
+ status_code = 200
+ response = {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Components': [{
+ 'Details': {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Experimental': 'false',
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux'
+ },
+ 'Name': 'Engine',
+ 'Version': '18.01.0-ce'
+ }],
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux',
+ 'Platform': {'Name': ''},
+ 'Version': '18.01.0-ce'
+ }
+
+ return status_code, response
+
+
+def get_fake_info():
+ status_code = 200
+ response = {'Containers': 1, 'Images': 1, 'Debug': False,
+ 'MemoryLimit': False, 'SwapLimit': False,
+ 'IPv4Forwarding': True}
+ return status_code, response
+
+
+def post_fake_auth():
+ status_code = 200
+ response = {'Status': 'Login Succeeded',
+ 'IdentityToken': '9cbaf023786cd7'}
+ return status_code, response
+
+
+def get_fake_ping():
+ return 200, "OK"
+
+
+def get_fake_search():
+ status_code = 200
+ response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
+ return status_code, response
+
+
+def get_fake_images():
+ status_code = 200
+ response = [{
+ 'Id': FAKE_IMAGE_ID,
+ 'Created': '2 days ago',
+ 'Repository': 'busybox',
+ 'RepoTags': ['busybox:latest', 'busybox:1.0'],
+ }]
+ return status_code, response
+
+
+def get_fake_image_history():
+ status_code = 200
+ response = [
+ {
+ "Id": "b750fe79269d",
+ "Created": 1364102658,
+ "CreatedBy": "/bin/bash"
+ },
+ {
+ "Id": "27cf78414709",
+ "Created": 1364068391,
+ "CreatedBy": ""
+ }
+ ]
+
+ return status_code, response
+
+
+def post_fake_import_image():
+ status_code = 200
+ response = 'Import messages...'
+
+ return status_code, response
+
+
+def get_fake_containers():
+ status_code = 200
+ response = [{
+ 'Id': FAKE_CONTAINER_ID,
+ 'Image': 'busybox:latest',
+ 'Created': '2 days ago',
+ 'Command': 'true',
+ 'Status': 'fake status'
+ }]
+ return status_code, response
+
+
+def post_fake_start_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_resize_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_create_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def get_fake_inspect_container(tty=False):
+ status_code = 200
+ response = {
+ 'Id': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
+ 'ID': FAKE_CONTAINER_ID,
+ 'Image': 'busybox:latest',
+ 'Name': 'foobar',
+ "State": {
+ "Status": "running",
+ "Running": True,
+ "Pid": 0,
+ "ExitCode": 0,
+ "StartedAt": "2013-09-25T14:01:18.869545111+02:00",
+ "Ghost": False
+ },
+ "HostConfig": {
+ "LogConfig": {
+ "Type": "json-file",
+ "Config": {}
+ },
+ },
+ "MacAddress": "02:42:ac:11:00:0a"
+ }
+ return status_code, response
+
+
+def get_fake_inspect_image():
+ status_code = 200
+ response = {
+ 'Id': FAKE_IMAGE_ID,
+ 'Parent': "27cf784147099545",
+ 'Created': "2013-03-23T22:24:18.818426-07:00",
+ 'Container': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'bar': 'foo'}},
+ 'ContainerConfig':
+ {
+ "Hostname": "",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "AttachStdin": False,
+ "AttachStdout": False,
+ "AttachStderr": False,
+ "PortSpecs": "",
+ "Tty": True,
+ "OpenStdin": True,
+ "StdinOnce": False,
+ "Env": "",
+ "Cmd": ["/bin/bash"],
+ "Dns": "",
+ "Image": "base",
+ "Volumes": "",
+ "VolumesFrom": "",
+ "WorkingDir": ""
+ },
+ 'Size': 6823592
+ }
+ return status_code, response
+
+
+def get_fake_insert_image():
+ status_code = 200
+ response = {'StatusCode': 0}
+ return status_code, response
+
+
+def get_fake_wait():
+ status_code = 200
+ response = {'StatusCode': 0}
+ return status_code, response
+
+
+def get_fake_logs():
+ status_code = 200
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x00'
+ b'\x02\x00\x00\x00\x00\x00\x00\x00'
+ b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
+ return status_code, response
+
+
+def get_fake_diff():
+ status_code = 200
+ response = [{'Path': '/test', 'Kind': 1}]
+ return status_code, response
+
+
+def get_fake_events():
+ status_code = 200
+ response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
+ 'from': FAKE_IMAGE_ID, 'time': 1423247867}]
+ return status_code, response
+
+
+def get_fake_export():
+ status_code = 200
+ response = 'Byte Stream....'
+ return status_code, response
+
+
+def post_fake_exec_create():
+ status_code = 200
+ response = {'Id': FAKE_EXEC_ID}
+ return status_code, response
+
+
+def post_fake_exec_start():
+ status_code = 200
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
+ return status_code, response
+
+
+def post_fake_exec_resize():
+ status_code = 201
+ return status_code, ''
+
+
+def get_fake_exec_inspect():
+ return 200, {
+ 'OpenStderr': True,
+ 'OpenStdout': True,
+ 'Container': get_fake_inspect_container()[1],
+ 'Running': False,
+ 'ProcessConfig': {
+ 'arguments': ['hello world'],
+ 'tty': False,
+ 'entrypoint': 'echo',
+ 'privileged': False,
+ 'user': ''
+ },
+ 'ExitCode': 0,
+ 'ID': FAKE_EXEC_ID,
+ 'OpenStdin': False
+ }
+
+
+def post_fake_stop_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_kill_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_pause_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_unpause_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_restart_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_rename_container():
+ status_code = 204
+ return status_code, None
+
+
+def delete_fake_remove_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_image_create():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def delete_fake_remove_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def get_fake_get_image():
+ status_code = 200
+ response = 'Byte Stream....'
+ return status_code, response
+
+
+def post_fake_load_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def post_fake_commit():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_push():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def post_fake_build_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_tag_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def get_fake_stats():
+ status_code = 200
+ response = fake_stat.OBJ
+ return status_code, response
+
+
+def get_fake_top():
+ return 200, {
+ 'Processes': [
+ [
+ 'root',
+ '26501',
+ '6907',
+ '0',
+ '10:32',
+ 'pts/55',
+ '00:00:00',
+ 'sleep 60',
+ ],
+ ],
+ 'Titles': [
+ 'UID',
+ 'PID',
+ 'PPID',
+ 'C',
+ 'STIME',
+ 'TTY',
+ 'TIME',
+ 'CMD',
+ ],
+ }
+
+
+def get_fake_volume_list():
+ status_code = 200
+ response = {
+ 'Volumes': [
+ {
+ 'Name': 'perfectcherryblossom',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Scope': 'local'
+ }, {
+ 'Name': 'subterraneananimism',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
+ 'Scope': 'local'
+ }
+ ]
+ }
+ return status_code, response
+
+
+def get_fake_volume():
+ status_code = 200
+ response = {
+ 'Name': 'perfectcherryblossom',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Labels': {
+ 'com.example.some-label': 'some-value'
+ },
+ 'Scope': 'local'
+ }
+ return status_code, response
+
+
+def fake_remove_volume():
+ return 204, None
+
+
+def post_fake_update_container():
+ return 200, {'Warnings': []}
+
+
+def post_fake_update_node():
+ return 200, None
+
+
+def post_fake_join_swarm():
+ return 200, None
+
+
+def get_fake_network_list():
+ return 200, [{
+ "Name": "bridge",
+ "Id": FAKE_NETWORK_ID,
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": False,
+ "Internal": False,
+ "IPAM": {
+ "Driver": "default",
+ "Config": [
+ {
+ "Subnet": "172.17.0.0/16"
+ }
+ ]
+ },
+ "Containers": {
+ FAKE_CONTAINER_ID: {
+ "EndpointID": "ed2419a97c1d99",
+ "MacAddress": "02:42:ac:11:00:02",
+ "IPv4Address": "172.17.0.2/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500"
+ }
+ }]
+
+
+def get_fake_network():
+ return 200, get_fake_network_list()[1][0]
+
+
+def post_fake_network():
+ return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
+
+
+def delete_fake_network():
+ return 204, None
+
+
+def post_fake_network_connect():
+ return 200, None
+
+
+def post_fake_network_disconnect():
+ return 200, None
+
+
+# Maps real api url to fake response callback
+prefix = 'http+docker://localhost'
+if constants.IS_WINDOWS_PLATFORM:
+ prefix = 'http+docker://localnpipe'
+
+fake_responses = {
+ '{0}/version'.format(prefix):
+ get_fake_version,
+ '{1}/{0}/version'.format(CURRENT_VERSION, prefix):
+ get_fake_version,
+ '{1}/{0}/info'.format(CURRENT_VERSION, prefix):
+ get_fake_info,
+ '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ post_fake_auth,
+ '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ get_fake_ping,
+ '{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
+ get_fake_search,
+ '{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
+ get_fake_images,
+ '{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
+ get_fake_image_history,
+ '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ post_fake_import_image,
+ '{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
+ get_fake_containers,
+ '{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
+ post_fake_start_container,
+ '{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
+ post_fake_resize_container,
+ '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
+ get_fake_inspect_container,
+ '{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
+ post_fake_rename_container,
+ '{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
+ post_fake_tag_image,
+ '{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
+ get_fake_wait,
+ '{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
+ get_fake_logs,
+ '{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
+ get_fake_diff,
+ '{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
+ get_fake_export,
+ '{1}/{0}/containers/3cc2351ab11b/update'.format(CURRENT_VERSION, prefix):
+ post_fake_update_container,
+ '{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
+ post_fake_exec_create,
+ '{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
+ post_fake_exec_start,
+ '{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
+ get_fake_exec_inspect,
+ '{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
+ post_fake_exec_resize,
+
+ '{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
+ get_fake_stats,
+ '{1}/{0}/containers/3cc2351ab11b/top'.format(CURRENT_VERSION, prefix):
+ get_fake_top,
+ '{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
+ post_fake_stop_container,
+ '{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
+ post_fake_kill_container,
+ '{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
+ post_fake_pause_container,
+ '{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
+ post_fake_unpause_container,
+ '{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
+ post_fake_restart_container,
+ '{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
+ delete_fake_remove_container,
+ '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
+ post_fake_image_create,
+ '{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
+ delete_fake_remove_image,
+ '{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
+ get_fake_get_image,
+ '{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
+ post_fake_load_image,
+ '{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
+ get_fake_inspect_image,
+ '{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
+ get_fake_insert_image,
+ '{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
+ post_fake_push,
+ '{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
+ post_fake_commit,
+ '{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
+ post_fake_create_container,
+ '{1}/{0}/build'.format(CURRENT_VERSION, prefix):
+ post_fake_build_container,
+ '{1}/{0}/events'.format(CURRENT_VERSION, prefix):
+ get_fake_events,
+ ('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
+ get_fake_volume_list,
+ ('{1}/{0}/volumes/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ get_fake_volume,
+ ('{1}/{0}/volumes/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
+ ), 'GET'):
+ get_fake_volume,
+ ('{1}/{0}/volumes/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
+ ), 'DELETE'):
+ fake_remove_volume,
+ ('{1}/{0}/nodes/{2}/update?version=1'.format(
+ CURRENT_VERSION, prefix, FAKE_NODE_ID
+ ), 'POST'):
+ post_fake_update_node,
+ ('{1}/{0}/swarm/join'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_join_swarm,
+ ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ get_fake_network_list,
+ ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'GET'):
+ get_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'DELETE'):
+ delete_fake_network,
+ ('{1}/{0}/networks/{2}/connect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_connect,
+ ('{1}/{0}/networks/{2}/disconnect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_disconnect,
+}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
new file mode 100644
index 0000000..2147bfd
--- /dev/null
+++ b/tests/unit/fake_api_client.py
@@ -0,0 +1,67 @@
+import copy
+import docker
+
+from . import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class CopyReturnMagicMock(mock.MagicMock):
+ """
+ A MagicMock which deep copies every return value.
+ """
+ def _mock_call(self, *args, **kwargs):
+ ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ if isinstance(ret, (dict, list)):
+ ret = copy.deepcopy(ret)
+ return ret
+
+
+def make_fake_api_client(overrides=None):
+ """
+ Returns non-complete fake APIClient.
+
+ This returns most of the default cases correctly, but most arguments that
+ change behaviour will not work.
+ """
+
+ if overrides is None:
+ overrides = {}
+ api_client = docker.APIClient()
+ mock_attrs = {
+ 'build.return_value': fake_api.FAKE_IMAGE_ID,
+ 'commit.return_value': fake_api.post_fake_commit()[1],
+ 'containers.return_value': fake_api.get_fake_containers()[1],
+ 'create_container.return_value':
+ fake_api.post_fake_create_container()[1],
+ 'create_host_config.side_effect': api_client.create_host_config,
+ 'create_network.return_value': fake_api.post_fake_network()[1],
+ 'exec_create.return_value': fake_api.post_fake_exec_create()[1],
+ 'exec_start.return_value': fake_api.post_fake_exec_start()[1],
+ 'images.return_value': fake_api.get_fake_images()[1],
+ 'inspect_container.return_value':
+ fake_api.get_fake_inspect_container()[1],
+ 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
+ 'inspect_network.return_value': fake_api.get_fake_network()[1],
+ 'logs.return_value': [b'hello world\n'],
+ 'networks.return_value': fake_api.get_fake_network_list()[1],
+ 'start.return_value': None,
+ 'wait.return_value': {'StatusCode': 0},
+ }
+ mock_attrs.update(overrides)
+ mock_client = CopyReturnMagicMock(**mock_attrs)
+
+ mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
+ return mock_client
+
+
+def make_fake_client(overrides=None):
+ """
+ Returns a Client with a fake APIClient.
+ """
+ client = docker.DockerClient()
+ client.api = make_fake_api_client(overrides)
+ return client
diff --git a/tests/unit/fake_stat.py b/tests/unit/fake_stat.py
new file mode 100644
index 0000000..a7f1029
--- /dev/null
+++ b/tests/unit/fake_stat.py
@@ -0,0 +1,133 @@
+OBJ = {
+ "read": "2015-02-11T19:20:46.667237763+02:00",
+ "network": {
+ "rx_bytes": 567224,
+ "rx_packets": 3773,
+ "rx_errors": 0,
+ "rx_dropped": 0,
+ "tx_bytes": 1176,
+ "tx_packets": 13,
+ "tx_errors": 0,
+ "tx_dropped": 0
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "total_usage": 157260874053,
+ "percpu_usage": [
+ 52196306950,
+ 24118413549,
+ 53292684398,
+ 27653469156
+ ],
+ "usage_in_kernelmode": 37140000000,
+ "usage_in_usermode": 62140000000
+ },
+ "system_cpu_usage": 3.0881377e+14,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "memory_stats": {
+ "usage": 179314688,
+ "max_usage": 258166784,
+ "stats": {
+ "active_anon": 90804224,
+ "active_file": 2195456,
+ "cache": 3096576,
+ "hierarchical_memory_limit": 1.844674407371e+19,
+ "inactive_anon": 85516288,
+ "inactive_file": 798720,
+ "mapped_file": 2646016,
+ "pgfault": 101034,
+ "pgmajfault": 1207,
+ "pgpgin": 115814,
+ "pgpgout": 75613,
+ "rss": 176218112,
+ "rss_huge": 12582912,
+ "total_active_anon": 90804224,
+ "total_active_file": 2195456,
+ "total_cache": 3096576,
+ "total_inactive_anon": 85516288,
+ "total_inactive_file": 798720,
+ "total_mapped_file": 2646016,
+ "total_pgfault": 101034,
+ "total_pgmajfault": 1207,
+ "total_pgpgin": 115814,
+ "total_pgpgout": 75613,
+ "total_rss": 176218112,
+ "total_rss_huge": 12582912,
+ "total_unevictable": 0,
+ "total_writeback": 0,
+ "unevictable": 0,
+ "writeback": 0
+ },
+ "failcnt": 0,
+ "limit": 8039038976
+ },
+ "blkio_stats": {
+ "io_service_bytes_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 72843264
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 4096
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 4096
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 72843264
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 72847360
+ }
+ ],
+ "io_serviced_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 10581
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 1
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 1
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 10581
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 10582
+ }
+ ],
+ "io_queue_recursive": [],
+ "io_service_time_recursive": [],
+ "io_wait_time_recursive": [],
+ "io_merged_recursive": [],
+ "io_time_recursive": [],
+ "sectors_recursive": []
+ }
+}
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
new file mode 100644
index 0000000..48a5288
--- /dev/null
+++ b/tests/unit/models_containers_test.py
@@ -0,0 +1,550 @@
+import docker
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
+from docker.models.containers import Container, _create_container_args
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
+from .fake_api_client import make_fake_client
+import pytest
+
+
+class ContainerCollectionTest(unittest.TestCase):
+ def test_run(self):
+ client = make_fake_client()
+ out = client.containers.run("alpine", "echo hello world")
+
+ assert out == b'hello world\n'
+
+ client.api.create_container.assert_called_with(
+ image="alpine",
+ command="echo hello world",
+ detach=False,
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.logs.assert_called_with(
+ FAKE_CONTAINER_ID, stderr=False, stdout=True, stream=True,
+ follow=True
+ )
+
+ def test_create_container_args(self):
+ create_kwargs = _create_container_args(dict(
+ image='alpine',
+ command='echo hello world',
+ blkio_weight_device=[{'Path': 'foo', 'Weight': 3}],
+ blkio_weight=2,
+ cap_add=['foo'],
+ cap_drop=['bar'],
+ cgroup_parent='foobar',
+ cpu_period=1,
+ cpu_quota=2,
+ cpu_shares=5,
+ cpuset_cpus='0-3',
+ detach=False,
+ device_read_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_read_iops=[{'Path': 'foo', 'Rate': 3}],
+ device_write_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_write_iops=[{'Path': 'foo', 'Rate': 3}],
+ devices=['/dev/sda:/dev/xvda:rwm'],
+ dns=['8.8.8.8'],
+ domainname='example.com',
+ dns_opt=['foo'],
+ dns_search=['example.com'],
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ extra_hosts={'foo': '1.2.3.4'},
+ group_add=['blah'],
+ ipc_mode='foo',
+ kernel_memory=123,
+ labels={'key': 'value'},
+ links={'foo': 'bar'},
+ log_config={'Type': 'json-file', 'Config': {}},
+ lxc_conf={'foo': 'bar'},
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ mac_address='abc123',
+ mem_limit=123,
+ mem_reservation=123,
+ mem_swappiness=2,
+ memswap_limit=456,
+ name='somename',
+ network_disabled=False,
+ network='foo',
+ oom_kill_disable=True,
+ oom_score_adj=5,
+ pid_mode='host',
+ pids_limit=500,
+ ports={
+ 1111: 4567,
+ 2222: None
+ },
+ privileged=True,
+ publish_all_ports=True,
+ read_only=True,
+ restart_policy={'Name': 'always'},
+ security_opt=['blah'],
+ shm_size=123,
+ stdin_open=True,
+ stop_signal=9,
+ sysctls={'foo': 'bar'},
+ tmpfs={'/blah': ''},
+ tty=True,
+ ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ user='bob',
+ userns_mode='host',
+ version='1.23',
+ volume_driver='some_driver',
+ volumes=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
+ ],
+ volumes_from=['container'],
+ working_dir='/code'
+ ))
+
+ expected = dict(
+ image='alpine',
+ command='echo hello world',
+ domainname='example.com',
+ detach=False,
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ host_config={
+ 'Binds': [
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
+ 'C:\\windows\\path:D:\\hello\\world:rw'
+ ],
+ 'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}],
+ 'BlkioWeight': 2,
+ 'CapAdd': ['foo'],
+ 'CapDrop': ['bar'],
+ 'CgroupParent': 'foobar',
+ 'CpuPeriod': 1,
+ 'CpuQuota': 2,
+ 'CpuShares': 5,
+ 'CpusetCpus': '0-3',
+ 'Devices': [{'PathOnHost': '/dev/sda',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvda'}],
+ 'Dns': ['8.8.8.8'],
+ 'DnsOptions': ['foo'],
+ 'DnsSearch': ['example.com'],
+ 'ExtraHosts': ['foo:1.2.3.4'],
+ 'GroupAdd': ['blah'],
+ 'IpcMode': 'foo',
+ 'KernelMemory': 123,
+ 'Links': ['foo:bar'],
+ 'LogConfig': {'Type': 'json-file', 'Config': {}},
+ 'LxcConf': [{'Key': 'foo', 'Value': 'bar'}],
+ 'Memory': 123,
+ 'MemoryReservation': 123,
+ 'MemorySwap': 456,
+ 'MemorySwappiness': 2,
+ 'NetworkMode': 'foo',
+ 'OomKillDisable': True,
+ 'OomScoreAdj': 5,
+ 'PidMode': 'host',
+ 'PidsLimit': 500,
+ 'PortBindings': {
+ '1111/tcp': [{'HostIp': '', 'HostPort': '4567'}],
+ '2222/tcp': [{'HostIp': '', 'HostPort': ''}]
+ },
+ 'Privileged': True,
+ 'PublishAllPorts': True,
+ 'ReadonlyRootfs': True,
+ 'RestartPolicy': {'Name': 'always'},
+ 'SecurityOpt': ['blah'],
+ 'ShmSize': 123,
+ 'Sysctls': {'foo': 'bar'},
+ 'Tmpfs': {'/blah': ''},
+ 'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ 'UsernsMode': 'host',
+ 'VolumesFrom': ['container'],
+ },
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ labels={'key': 'value'},
+ mac_address='abc123',
+ name='somename',
+ network_disabled=False,
+ networking_config={'foo': None},
+ ports=[('1111', 'tcp'), ('2222', 'tcp')],
+ stdin_open=True,
+ stop_signal=9,
+ tty=True,
+ user='bob',
+ volume_driver='some_driver',
+ volumes=[
+ '/mnt/vol2',
+ '/mnt/vol1',
+ '/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath',
+ 'D:\\hello\\world'
+ ],
+ working_dir='/code'
+ )
+
+ assert create_kwargs == expected
+
+ def test_run_detach(self):
+ client = make_fake_client()
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='sleep 300',
+ detach=True,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_run_pull(self):
+ client = make_fake_client()
+
+ # raise exception on first call, then return normal value
+ client.api.create_container.side_effect = [
+ docker.errors.ImageNotFound(""),
+ client.api.create_container.return_value
+ ]
+
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.pull.assert_called_with('alpine', platform=None, tag=None)
+
+ def test_run_with_error(self):
+ client = make_fake_client()
+ client.api.logs.return_value = "some error"
+ client.api.wait.return_value = {'StatusCode': 1}
+
+ with pytest.raises(docker.errors.ContainerError) as cm:
+ client.containers.run('alpine', 'echo hello world')
+ assert cm.value.exit_status == 1
+ assert "some error" in cm.exconly()
+
+ def test_run_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.run(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ detach=False,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+
+ def test_run_remove(self):
+ client = make_fake_client()
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api.wait.return_value = {'StatusCode': 1}
+ with pytest.raises(docker.errors.ContainerError):
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api._version = '1.24'
+ with pytest.raises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ client = make_fake_client()
+ client.api._version = '1.23'
+ with pytest.raises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ client = make_fake_client()
+ client.api._version = '1.25'
+ client.containers.run("alpine", detach=True, remove=True)
+ client.api.remove_container.assert_not_called()
+ client.api.create_container.assert_called_with(
+ command=None,
+ image='alpine',
+ detach=True,
+ host_config={'AutoRemove': True,
+ 'NetworkMode': 'default'}
+ )
+
+ client = make_fake_client()
+ client.api._version = '1.26'
+ client.containers.run("alpine", detach=True, remove=True)
+ client.api.remove_container.assert_not_called()
+ client.api.create_container.assert_called_with(
+ command=None,
+ image='alpine',
+ detach=True,
+ host_config={'AutoRemove': True,
+ 'NetworkMode': 'default'}
+ )
+
+ def test_create(self):
+ client = make_fake_client()
+ container = client.containers.create(
+ 'alpine',
+ 'echo hello world',
+ environment={'FOO': 'BAR'}
+ )
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='echo hello world',
+ environment={'FOO': 'BAR'},
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_create_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.create(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ host_config={'NetworkMode': 'default'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ containers = client.containers.list(all=True)
+ client.api.containers.assert_called_with(
+ all=True,
+ before=None,
+ filters=None,
+ limit=-1,
+ since=None
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ assert len(containers) == 1
+ assert isinstance(containers[0], Container)
+ assert containers[0].id == FAKE_CONTAINER_ID
+
+ def test_list_ignore_removed(self):
+ def side_effect(*args, **kwargs):
+ raise docker.errors.NotFound('Container not found')
+ client = make_fake_client({
+ 'inspect_container.side_effect': side_effect
+ })
+
+ with pytest.raises(docker.errors.NotFound):
+ client.containers.list(all=True, ignore_removed=False)
+
+ assert client.containers.list(all=True, ignore_removed=True) == []
+
+
+class ContainerTest(unittest.TestCase):
+ def test_name(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.name == 'foobar'
+
+ def test_status(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.status == "running"
+
+ def test_attach(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attach(stream=True)
+ client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True)
+
+ def test_commit(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ image = container.commit()
+ client.api.commit.assert_called_with(FAKE_CONTAINER_ID,
+ repository=None,
+ tag=None)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_diff(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.diff()
+ client.api.diff.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_exec_run(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("echo hello world", privileged=True, stream=True)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ )
+
+ def test_exec_run_failure(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("docker ps", privileged=True, stream=False)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None,
+ workdir=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False
+ )
+
+ def test_export(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.export()
+ client.api.export.assert_called_with(
+ FAKE_CONTAINER_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_get_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.get_archive('foo')
+ client.api.get_archive.assert_called_with(
+ FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_image(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.image.id == FAKE_IMAGE_ID
+
+ def test_kill(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.kill(signal=5)
+ client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5)
+
+ def test_labels(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.labels == {'foo': 'bar'}
+
+ def test_logs(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.logs()
+ client.api.logs.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_pause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.pause()
+ client.api.pause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_put_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.put_archive('path', 'foo')
+ client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID,
+ 'path', 'foo')
+
+ def test_remove(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.remove()
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_rename(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.rename("foo")
+ client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo")
+
+ def test_resize(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.resize(1, 2)
+ client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2)
+
+ def test_restart(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.restart()
+ client.api.restart.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_start(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.start()
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stats(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stats()
+ client.api.stats.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stop(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stop()
+ client.api.stop.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_top(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.top()
+ client.api.top.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_unpause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.unpause()
+ client.api.unpause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_update(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.update(cpu_shares=2)
+ client.api.update_container.assert_called_with(FAKE_CONTAINER_ID,
+ cpu_shares=2)
+
+ def test_wait(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.wait()
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
new file mode 100644
index 0000000..6783279
--- /dev/null
+++ b/tests/unit/models_images_test.py
@@ -0,0 +1,128 @@
+from docker.constants import DEFAULT_DATA_CHUNK_SIZE
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+ def test_build(self):
+ client = make_fake_client()
+ image = client.images.build()
+ client.api.build.assert_called_with()
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_get(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_labels(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ assert image.labels == {'bar': 'foo'}
+
+ def test_list(self):
+ client = make_fake_client()
+ images = client.images.list(all=True)
+ client.api.images.assert_called_with(all=True, name=None, filters=None)
+ assert len(images) == 1
+ assert isinstance(images[0], Image)
+ assert images[0].id == FAKE_IMAGE_ID
+
+ def test_load(self):
+ client = make_fake_client()
+ client.images.load('byte stream')
+ client.api.load_image.assert_called_with('byte stream')
+
+ def test_pull(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image:latest')
+ client.api.pull.assert_called_with('test_image', tag='latest')
+ client.api.inspect_image.assert_called_with('test_image:latest')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_pull_multiple(self):
+ client = make_fake_client()
+ images = client.images.pull('test_image')
+ client.api.pull.assert_called_with('test_image', tag=None)
+ client.api.images.assert_called_with(
+ all=False, name='test_image', filters=None
+ )
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert len(images) == 1
+ image = images[0]
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_push(self):
+ client = make_fake_client()
+ client.images.push('foobar', insecure_registry=True)
+ client.api.push.assert_called_with(
+ 'foobar',
+ tag=None,
+ insecure_registry=True
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ client.images.remove('test_image')
+ client.api.remove_image.assert_called_with('test_image')
+
+ def test_search(self):
+ client = make_fake_client()
+ client.images.search('test')
+ client.api.search.assert_called_with('test')
+
+
+class ImageTest(unittest.TestCase):
+ def test_short_id(self):
+ image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'sha256:b684607067'
+
+ image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'b684607067'
+
+ def test_tags(self):
+ image = Image(attrs={
+ 'RepoTags': ['test_image:latest']
+ })
+ assert image.tags == ['test_image:latest']
+
+ image = Image(attrs={
+ 'RepoTags': ['<none>:<none>']
+ })
+ assert image.tags == []
+
+ image = Image(attrs={
+ 'RepoTags': None
+ })
+ assert image.tags == []
+
+ def test_history(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.history()
+ client.api.history.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_save(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.save()
+ client.api.get_image.assert_called_with(
+ FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE
+ )
+
+ def test_tag(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.tag('foo')
+ client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py
new file mode 100644
index 0000000..58c9fce
--- /dev/null
+++ b/tests/unit/models_networks_test.py
@@ -0,0 +1,64 @@
+import unittest
+
+from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class NetworkCollectionTest(unittest.TestCase):
+
+ def test_create(self):
+ client = make_fake_client()
+ network = client.networks.create("foobar", labels={'foo': 'bar'})
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+ assert client.api.create_network.called_once_with(
+ "foobar",
+ labels={'foo': 'bar'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ networks = client.networks.list()
+ assert networks[0].id == FAKE_NETWORK_ID
+ assert client.api.networks.called_once_with()
+
+ client = make_fake_client()
+ client.networks.list(ids=["abc"])
+ assert client.api.networks.called_once_with(ids=["abc"])
+
+ client = make_fake_client()
+ client.networks.list(names=["foobar"])
+ assert client.api.networks.called_once_with(names=["foobar"])
+
+
+class NetworkTest(unittest.TestCase):
+
+ def test_connect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.connect(FAKE_CONTAINER_ID)
+ assert client.api.connect_container_to_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_disconnect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.disconnect(FAKE_CONTAINER_ID)
+ assert client.api.disconnect_container_from_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.remove()
+ assert client.api.remove_network.called_once_with(FAKE_NETWORK_ID)
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
new file mode 100644
index 0000000..5af24ee
--- /dev/null
+++ b/tests/unit/models_resources_test.py
@@ -0,0 +1,28 @@
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ModelTest(unittest.TestCase):
+ def test_reload(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attrs['Name'] = "oldname"
+ container.reload()
+ assert client.api.inspect_container.call_count == 2
+ assert container.attrs['Name'] == "foobar"
+
+ def test_hash(self):
+ client = make_fake_client()
+ container1 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set = set([container1])
+ assert len(my_set) == 1
+
+ container2 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set.add(container2)
+ assert len(my_set) == 1
+
+ image1 = client.images.get(FAKE_CONTAINER_ID)
+ my_set.add(image1)
+ assert len(my_set) == 2
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
new file mode 100644
index 0000000..247bb4a
--- /dev/null
+++ b/tests/unit/models_services_test.py
@@ -0,0 +1,53 @@
+import unittest
+from docker.models.services import _get_create_service_kwargs
+
+
+class CreateServiceKwargsTest(unittest.TestCase):
+ def test_get_create_service_kwargs(self):
+ kwargs = _get_create_service_kwargs('test', {
+ 'image': 'foo',
+ 'command': 'true',
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'hostname': 'test_host',
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ 'container_labels': {'containerkey': 'containervalue'},
+ 'resources': {'foo': 'bar'},
+ 'restart_policy': {'restart': 'policy'},
+ 'log_driver': 'logdriver',
+ 'log_driver_options': {'foo': 'bar'},
+ 'args': ['some', 'args'],
+ 'env': {'FOO': 'bar'},
+ 'workdir': '/',
+ 'user': 'bob',
+ 'mounts': [{'some': 'mounts'}],
+ 'stop_grace_period': 5,
+ 'constraints': ['foo=bar'],
+ })
+
+ task_template = kwargs.pop('task_template')
+
+ assert kwargs == {
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'endpoint_spec': {'blah': 'blah'},
+ }
+ assert set(task_template.keys()) == set([
+ 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
+ 'LogDriver', 'Networks'
+ ])
+ assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ assert task_template['LogDriver'] == {
+ 'Name': 'logdriver',
+ 'Options': {'foo': 'bar'}
+ }
+ assert task_template['Networks'] == [{'Target': 'somenet'}]
+ assert set(task_template['ContainerSpec'].keys()) == set([
+ 'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
+ 'Labels', 'Mounts', 'StopGracePeriod'
+ ])
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
new file mode 100644
index 0000000..73b7336
--- /dev/null
+++ b/tests/unit/ssladapter_test.py
@@ -0,0 +1,78 @@
+import unittest
+from docker.transport import ssladapter
+import pytest
+
+try:
+ from backports.ssl_match_hostname import (
+ match_hostname, CertificateError
+ )
+except ImportError:
+ from ssl import (
+ match_hostname, CertificateError
+ )
+
+try:
+ from ssl import OP_NO_SSLv3, OP_NO_SSLv2, OP_NO_TLSv1
+except ImportError:
+ OP_NO_SSLv2 = 0x1000000
+ OP_NO_SSLv3 = 0x2000000
+ OP_NO_TLSv1 = 0x4000000
+
+
+class SSLAdapterTest(unittest.TestCase):
+ def test_only_uses_tls(self):
+ ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
+
+ assert ssl_context.options & OP_NO_SSLv3
+ # if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
+ assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
+ assert not ssl_context.options & OP_NO_TLSv1
+
+
+class MatchHostnameTest(unittest.TestCase):
+ cert = {
+ 'issuer': (
+ (('countryName', u'US'),),
+ (('stateOrProvinceName', u'California'),),
+ (('localityName', u'San Francisco'),),
+ (('organizationName', u'Docker Inc'),),
+ (('organizationalUnitName', u'Docker-Python'),),
+ (('commonName', u'localhost'),),
+ (('emailAddress', u'info@docker.com'),)
+ ),
+ 'notAfter': 'Mar 25 23:08:23 2030 GMT',
+ 'notBefore': u'Mar 25 23:08:23 2016 GMT',
+ 'serialNumber': u'BD5F894C839C548F',
+ 'subject': (
+ (('countryName', u'US'),),
+ (('stateOrProvinceName', u'California'),),
+ (('localityName', u'San Francisco'),),
+ (('organizationName', u'Docker Inc'),),
+ (('organizationalUnitName', u'Docker-Python'),),
+ (('commonName', u'localhost'),),
+ (('emailAddress', u'info@docker.com'),)
+ ),
+ 'subjectAltName': (
+ ('DNS', u'localhost'),
+ ('DNS', u'*.gensokyo.jp'),
+ ('IP Address', u'127.0.0.1'),
+ ),
+ 'version': 3
+ }
+
+ def test_match_ip_address_success(self):
+ assert match_hostname(self.cert, '127.0.0.1') is None
+
+ def test_match_localhost_success(self):
+ assert match_hostname(self.cert, 'localhost') is None
+
+ def test_match_dns_success(self):
+ assert match_hostname(self.cert, 'touhou.gensokyo.jp') is None
+
+ def test_match_ip_address_failure(self):
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, '192.168.0.25')
+
+ def test_match_dns_failure(self):
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, 'foobar.co.uk')
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
new file mode 100644
index 0000000..4385380
--- /dev/null
+++ b/tests/unit/swarm_test.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+import json
+
+from . import fake_api
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
+
+
+class SwarmTest(BaseAPIClientTest):
+ @requires_api_version('1.24')
+ def test_node_update(self):
+ node_spec = {
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+
+ self.client.update_node(
+ node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec
+ )
+ args = fake_request.call_args
+ assert args[0][1] == (
+ url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
+ )
+ assert json.loads(args[1]['data']) == node_spec
+ assert args[1]['headers']['Content-Type'] == 'application/json'
+
+ @requires_api_version('1.24')
+ def test_join_swarm(self):
+ remote_addr = ['1.2.3.4:2377']
+ listen_addr = '2.3.4.5:2377'
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(
+ remote_addrs=remote_addr,
+ listen_addr=listen_addr,
+ join_token=join_token
+ )
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
+
+ @requires_api_version('1.24')
+ def test_join_swarm_no_listen_address_takes_default(self):
+ remote_addr = ['1.2.3.4:2377']
+ join_token = 'A_BEAUTIFUL_JOIN_TOKEN'
+
+ data = {
+ 'RemoteAddrs': remote_addr,
+ 'ListenAddr': '0.0.0.0:2377',
+ 'JoinToken': join_token
+ }
+
+ self.client.join_swarm(remote_addrs=remote_addr, join_token=join_token)
+
+ args = fake_request.call_args
+
+ assert (args[0][1] == url_prefix + 'swarm/join')
+ assert (json.loads(args[1]['data']) == data)
+ assert (args[1]['headers']['Content-Type'] == 'application/json')
diff --git a/tests/unit/testdata/certs/ca.pem b/tests/unit/testdata/certs/ca.pem
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unit/testdata/certs/ca.pem
diff --git a/tests/unit/testdata/certs/cert.pem b/tests/unit/testdata/certs/cert.pem
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unit/testdata/certs/cert.pem
diff --git a/tests/unit/testdata/certs/key.pem b/tests/unit/testdata/certs/key.pem
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unit/testdata/certs/key.pem
diff --git a/tests/unit/utils_build_test.py b/tests/unit/utils_build_test.py
new file mode 100644
index 0000000..012f15b
--- /dev/null
+++ b/tests/unit/utils_build_test.py
@@ -0,0 +1,493 @@
+# -*- coding: utf-8 -*-
+
+import os
+import os.path
+import shutil
+import socket
+import tarfile
+import tempfile
+import unittest
+
+
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.utils import exclude_paths, tar
+
+import pytest
+
+from ..helpers import make_tree
+
+
+def convert_paths(collection):
+ return set(map(convert_path, collection))
+
+
+def convert_path(path):
+ return path.replace('/', os.path.sep)
+
+
+class ExcludePathsTest(unittest.TestCase):
+ dirs = [
+ 'foo',
+ 'foo/bar',
+ 'bar',
+ 'target',
+ 'target/subdir',
+ 'subdir',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir'
+ ]
+
+ files = [
+ 'Dockerfile',
+ 'Dockerfile.alt',
+ '.dockerignore',
+ 'a.py',
+ 'a.go',
+ 'b.py',
+ 'cde.py',
+ 'foo/a.py',
+ 'foo/b.py',
+ 'foo/bar/a.py',
+ 'bar/a.py',
+ 'foo/Dockerfile3',
+ 'target/file.txt',
+ 'target/subdir/file.txt',
+ 'subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ ]
+
+ all_paths = set(dirs + files)
+
+ def setUp(self):
+ self.base = make_tree(self.dirs, self.files)
+
+ def tearDown(self):
+ shutil.rmtree(self.base)
+
+ def exclude(self, patterns, dockerfile=None):
+ return set(exclude_paths(self.base, patterns, dockerfile=dockerfile))
+
+ def test_no_excludes(self):
+ assert self.exclude(['']) == convert_paths(self.all_paths)
+
+ def test_no_dupes(self):
+ paths = exclude_paths(self.base, ['!a.py'])
+ assert sorted(paths) == sorted(set(paths))
+
+ def test_wildcard_exclude(self):
+ assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
+
+ def test_exclude_dockerfile_dockerignore(self):
+ """
+ Even if the .dockerignore file explicitly says to exclude
+ Dockerfile and/or .dockerignore, don't exclude them from
+ the actual tar file.
+ """
+ assert self.exclude(['Dockerfile', '.dockerignore']) == convert_paths(
+ self.all_paths
+ )
+
+ def test_exclude_custom_dockerfile(self):
+ """
+ If we're using a custom Dockerfile, make sure that's not
+ excluded.
+ """
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(
+ ['Dockerfile.alt', '.dockerignore']
+ )
+
+ assert self.exclude(
+ ['*'], dockerfile='foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+
+ # https://github.com/docker/docker-py/issues/1956
+ assert self.exclude(
+ ['*'], dockerfile='./foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+
+ def test_exclude_dockerfile_child(self):
+ includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
+ assert convert_path('foo/Dockerfile3') in includes
+ assert convert_path('foo/a.py') not in includes
+
+ def test_single_filename(self):
+ assert self.exclude(['a.py']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ def test_single_filename_leading_dot_slash(self):
+ assert self.exclude(['./a.py']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ # As odd as it sounds, a filename pattern with a trailing slash on the
+ # end *will* result in that file being excluded.
+ def test_single_filename_trailing_slash(self):
+ assert self.exclude(['a.py/']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ def test_wildcard_filename_start(self):
+ assert self.exclude(['*.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'b.py', 'cde.py'])
+ )
+
+ def test_wildcard_with_exception(self):
+ assert self.exclude(['*.py', '!b.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'cde.py'])
+ )
+
+ def test_wildcard_with_wildcard_exception(self):
+ assert self.exclude(['*.*', '!*.go']) == convert_paths(
+ self.all_paths - set([
+ 'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
+ ])
+ )
+
+ def test_wildcard_filename_end(self):
+ assert self.exclude(['a.*']) == convert_paths(
+ self.all_paths - set(['a.py', 'a.go'])
+ )
+
+ def test_question_mark(self):
+ assert self.exclude(['?.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'b.py'])
+ )
+
+ def test_single_subdir_single_filename(self):
+ assert self.exclude(['foo/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_single_subdir_single_filename_leading_slash(self):
+ assert self.exclude(['/foo/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_exclude_include_absolute_path(self):
+ base = make_tree([], ['a.py', 'b.py'])
+ assert exclude_paths(
+ base,
+ ['/*', '!/*.py']
+ ) == set(['a.py', 'b.py'])
+
+ def test_single_subdir_with_path_traversal(self):
+ assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_single_subdir_wildcard_filename(self):
+ assert self.exclude(['foo/*.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'foo/b.py'])
+ )
+
+ def test_wildcard_subdir_single_filename(self):
+ assert self.exclude(['*/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'bar/a.py'])
+ )
+
+ def test_wildcard_subdir_wildcard_filename(self):
+ assert self.exclude(['*/*.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'foo/b.py', 'bar/a.py'])
+ )
+
+ def test_directory(self):
+ assert self.exclude(['foo']) == convert_paths(
+ self.all_paths - set([
+ 'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py',
+ 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_trailing_slash(self):
+ assert self.exclude(['foo']) == convert_paths(
+ self.all_paths - set([
+ 'foo', 'foo/a.py', 'foo/b.py',
+ 'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_single_exception(self):
+ assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
+ 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_subdir_exception(self):
+ assert self.exclude(['foo', '!foo/bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_directory_with_subdir_exception_win32_pathsep(self):
+ assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_wildcard_exception(self):
+ assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
+ self.all_paths - set([
+ 'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_subdirectory(self):
+ assert self.exclude(['foo/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_subdirectory_win32_pathsep(self):
+ assert self.exclude(['foo\\bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_double_wildcard(self):
+ assert self.exclude(['**/a.py']) == convert_paths(
+ self.all_paths - set(
+ ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
+ )
+ )
+
+ assert self.exclude(['foo/**/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_single_and_double_wildcard(self):
+ assert self.exclude(['**/target/*/*']) == convert_paths(
+ self.all_paths - set(
+ ['target/subdir/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt']
+ )
+ )
+
+ def test_trailing_double_wildcard(self):
+ assert self.exclude(['subdir/**']) == convert_paths(
+ self.all_paths - set(
+ ['subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir']
+ )
+ )
+
+ def test_double_wildcard_with_exception(self):
+ assert self.exclude(['**', '!bar', '!foo/bar']) == convert_paths(
+ set([
+ 'foo/bar', 'foo/bar/a.py', 'bar', 'bar/a.py', 'Dockerfile',
+ '.dockerignore',
+ ])
+ )
+
+ def test_include_wildcard(self):
+ # This may be surprising but it matches the CLI's behavior
+ # (tested with 18.05.0-ce on linux)
+ base = make_tree(['a'], ['a/b.py'])
+ assert exclude_paths(
+ base,
+ ['*', '!*/b.py']
+ ) == set()
+
+ def test_last_line_precedence(self):
+ base = make_tree(
+ [],
+ ['garbage.md',
+ 'trash.md',
+ 'README.md',
+ 'README-bis.md',
+ 'README-secret.md'])
+ assert exclude_paths(
+ base,
+ ['*.md', '!README*.md', 'README-secret.md']
+ ) == set(['README.md', 'README-bis.md'])
+
+ def test_parent_directory(self):
+ base = make_tree(
+ [],
+ ['a.py',
+ 'b.py',
+ 'c.py'])
+ # Dockerignore reference stipulates that absolute paths are
+ # equivalent to relative paths, hence /../foo should be
+ # equivalent to ../foo. It also stipulates that paths are run
+ # through Go's filepath.Clean, which explicitely "replace
+ # "/.." by "/" at the beginning of a path".
+ assert exclude_paths(
+ base,
+ ['../a.py', '/../b.py']
+ ) == set(['c.py'])
+
+
+class TarTest(unittest.TestCase):
+ def test_tar_with_excludes(self):
+ dirs = [
+ 'foo',
+ 'foo/bar',
+ 'bar',
+ ]
+
+ files = [
+ 'Dockerfile',
+ 'Dockerfile.alt',
+ '.dockerignore',
+ 'a.py',
+ 'a.go',
+ 'b.py',
+ 'cde.py',
+ 'foo/a.py',
+ 'foo/b.py',
+ 'foo/bar/a.py',
+ 'bar/a.py',
+ ]
+
+ exclude = [
+ '*.py',
+ '!b.py',
+ '!a.go',
+ 'foo',
+ 'Dockerfile*',
+ '.dockerignore',
+ ]
+
+ expected_names = set([
+ 'Dockerfile',
+ '.dockerignore',
+ 'a.go',
+ 'b.py',
+ 'bar',
+ 'bar/a.py',
+ ])
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ with tar(base, exclude=exclude) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == sorted(expected_names)
+
+ def test_tar_with_empty_directory(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM or os.geteuid() == 0,
+ reason='root user always has access ; no chmod on Windows'
+ )
+ def test_tar_with_inaccessible_file(self):
+ base = tempfile.mkdtemp()
+ full_path = os.path.join(base, 'foo')
+ self.addCleanup(shutil.rmtree, base)
+ with open(full_path, 'w') as f:
+ f.write('content')
+ os.chmod(full_path, 0o222)
+ with pytest.raises(IOError) as ei:
+ tar(base)
+
+ assert 'Can not read file in context: {}'.format(full_path) in (
+ ei.exconly()
+ )
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_file_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ with open(os.path.join(base, 'foo'), 'w') as f:
+ f.write("content")
+ os.makedirs(os.path.join(base, 'bar'))
+ os.symlink('../foo', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_directory_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ os.symlink('../foo', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_broken_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+
+ os.symlink('../baz', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
+ def test_tar_socket_file(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ sock = socket.socket(socket.AF_UNIX)
+ self.addCleanup(sock.close)
+ sock.bind(os.path.join(base, 'test.sock'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ def tar_test_negative_mtime_bug(self):
+ base = tempfile.mkdtemp()
+ filename = os.path.join(base, 'th.txt')
+ self.addCleanup(shutil.rmtree, base)
+ with open(filename, 'w') as f:
+ f.write('Invisible Full Moon')
+ os.utime(filename, (12345, -3600.0))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert tar_data.getnames() == ['th.txt']
+ assert tar_data.getmember('th.txt').mtime == -3600
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_directory_link(self):
+ dirs = ['a', 'b', 'a/c']
+ files = ['a/hello.py', 'b/utils.py', 'a/c/descend.py']
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+ os.symlink(os.path.join(base, 'b'), os.path.join(base, 'a/c/b'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ names = tar_data.getnames()
+ for member in dirs + files:
+ assert member in names
+ assert 'a/c/b' in names
+ assert 'a/c/b/utils.py' not in names
diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py
new file mode 100644
index 0000000..50ba383
--- /dev/null
+++ b/tests/unit/utils_config_test.py
@@ -0,0 +1,123 @@
+import os
+import unittest
+import shutil
+import tempfile
+import json
+
+from py.test import ensuretemp
+from pytest import mark
+from docker.utils import config
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class FindConfigFileTest(unittest.TestCase):
+ def tmpdir(self, name):
+ tmpdir = ensuretemp(name)
+ self.addCleanup(tmpdir.remove)
+ return tmpdir
+
+ def test_find_config_fallback(self):
+ tmpdir = self.tmpdir('test_find_config_fallback')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() is None
+
+ def test_find_config_from_explicit_path(self):
+ tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ config_path = tmpdir.ensure('my-config-file.json')
+
+ assert config.find_config_file(str(config_path)) == str(config_path)
+
+ def test_find_config_from_environment(self):
+ tmpdir = self.tmpdir('test_find_config_from_environment')
+ config_path = tmpdir.ensure('config.json')
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_posix(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_legacy_name(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ config_path = tmpdir.ensure('.dockercfg')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform != 'win32'")
+ def test_find_config_from_home_windows(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = config.load_general_config(folder)
+ assert cfg is not None
+ assert isinstance(cfg, dict)
+ assert not cfg
+
+ def test_load_config_custom_headers(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'HttpHeaders': {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ },
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert 'HttpHeaders' in cfg
+ assert cfg['HttpHeaders'] == {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ }
+
+ def test_load_config_detach_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert cfg == config_data
+
+ def test_load_config_from_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = config.load_general_config(None)
+ assert cfg == config_data
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
new file mode 100644
index 0000000..f7aefd0
--- /dev/null
+++ b/tests/unit/utils_json_stream_test.py
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
new file mode 100644
index 0000000..8880cfe
--- /dev/null
+++ b/tests/unit/utils_test.py
@@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import json
+import os
+import os.path
+import shutil
+import sys
+import tempfile
+import unittest
+
+
+from docker.api.client import APIClient
+from docker.errors import DockerException
+from docker.utils import (
+ convert_filters, convert_volume_binds, decode_json_header, kwargs_from_env,
+ parse_bytes, parse_devices, parse_env_file, parse_host,
+ parse_repository_tag, split_command, update_headers,
+)
+
+from docker.utils.ports import build_port_bindings, split_port
+from docker.utils.utils import format_environment
+
+import pytest
+
+import six
+
+TEST_CERT_DIR = os.path.join(
+ os.path.dirname(__file__),
+ 'testdata/certs',
+)
+
+
+class DecoratorsTest(unittest.TestCase):
+ def test_update_headers(self):
+ sample_headers = {
+ 'X-Docker-Locale': 'en-US',
+ }
+
+ def f(self, headers=None):
+ return headers
+
+ client = APIClient()
+ client._general_configs = {}
+
+ g = update_headers(f)
+ assert g(client, headers=None) is None
+ assert g(client, headers={}) == {}
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ }
+
+ client._general_configs = {
+ 'HttpHeaders': sample_headers
+ }
+
+ assert g(client, headers=None) == sample_headers
+ assert g(client, headers={}) == sample_headers
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ 'X-Docker-Locale': 'en-US',
+ }
+
+
+class KwargsFromEnvTest(unittest.TestCase):
+ def setUp(self):
+ self.os_environ = os.environ.copy()
+
+ def tearDown(self):
+ os.environ = self.os_environ
+
+ def test_kwargs_from_env_empty(self):
+ os.environ.update(DOCKER_HOST='',
+ DOCKER_CERT_PATH='')
+ os.environ.pop('DOCKER_TLS_VERIFY', None)
+
+ kwargs = kwargs_from_env()
+ assert kwargs.get('base_url') is None
+ assert kwargs.get('tls') is None
+
+ def test_kwargs_from_env_tls(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ kwargs = kwargs_from_env(assert_hostname=False)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is False
+ assert kwargs['tls'].verify
+ try:
+ client = APIClient(**kwargs)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].ca_cert == client.verify
+ assert kwargs['tls'].cert == client.cert
+ except TypeError as e:
+ self.fail(e)
+
+ def test_kwargs_from_env_tls_verify_false(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='')
+ kwargs = kwargs_from_env(assert_hostname=True)
+ assert 'https://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is True
+ assert kwargs['tls'].verify is False
+ try:
+ client = APIClient(**kwargs)
+ assert kwargs['base_url'] == client.base_url
+ assert kwargs['tls'].cert == client.cert
+ assert not kwargs['tls'].verify
+ except TypeError as e:
+ self.fail(e)
+
+ def test_kwargs_from_env_tls_verify_false_no_cert(self):
+ temp_dir = tempfile.mkdtemp()
+ cert_dir = os.path.join(temp_dir, '.docker')
+ shutil.copytree(TEST_CERT_DIR, cert_dir)
+
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ HOME=temp_dir,
+ DOCKER_TLS_VERIFY='')
+ os.environ.pop('DOCKER_CERT_PATH', None)
+ kwargs = kwargs_from_env(assert_hostname=True)
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
+
+ def test_kwargs_from_env_no_cert_path(self):
+ try:
+ temp_dir = tempfile.mkdtemp()
+ cert_dir = os.path.join(temp_dir, '.docker')
+ shutil.copytree(TEST_CERT_DIR, cert_dir)
+
+ os.environ.update(HOME=temp_dir,
+ DOCKER_CERT_PATH='',
+ DOCKER_TLS_VERIFY='1')
+
+ kwargs = kwargs_from_env()
+ assert kwargs['tls'].verify
+ assert cert_dir in kwargs['tls'].ca_cert
+ assert cert_dir in kwargs['tls'].cert[0]
+ assert cert_dir in kwargs['tls'].cert[1]
+ finally:
+ if temp_dir:
+ shutil.rmtree(temp_dir)
+
+ def test_kwargs_from_env_alternate_env(self):
+ # Values in os.environ are entirely ignored if an alternate is
+ # provided
+ os.environ.update(
+ DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY=''
+ )
+ kwargs = kwargs_from_env(environment={
+ 'DOCKER_HOST': 'http://docker.gensokyo.jp:2581',
+ })
+ assert 'http://docker.gensokyo.jp:2581' == kwargs['base_url']
+ assert 'tls' not in kwargs
+
+
+class ConverVolumeBindsTest(unittest.TestCase):
+ def test_convert_volume_binds_empty(self):
+ assert convert_volume_binds({}) == []
+ assert convert_volume_binds([]) == []
+
+ def test_convert_volume_binds_list(self):
+ data = ['/a:/a:ro', '/b:/c:z']
+ assert convert_volume_binds(data) == data
+
+ def test_convert_volume_binds_complete(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data',
+ 'mode': 'ro'
+ }
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:ro']
+
+ def test_convert_volume_binds_compact(self):
+ data = {
+ '/mnt/vol1': '/data'
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
+
+ def test_convert_volume_binds_no_mode(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data'
+ }
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
+
+ def test_convert_volume_binds_unicode_bytes_input(self):
+ expected = [u'/mnt/지연:/unicode/박:rw']
+
+ data = {
+ u'/mnt/지연'.encode('utf-8'): {
+ 'bind': u'/unicode/박'.encode('utf-8'),
+ 'mode': 'rw'
+ }
+ }
+ assert convert_volume_binds(data) == expected
+
+ def test_convert_volume_binds_unicode_unicode_input(self):
+ expected = [u'/mnt/지연:/unicode/박:rw']
+
+ data = {
+ u'/mnt/지연': {
+ 'bind': u'/unicode/박',
+ 'mode': 'rw'
+ }
+ }
+ assert convert_volume_binds(data) == expected
+
+
+class ParseEnvFileTest(unittest.TestCase):
+ def generate_tempfile(self, file_content=None):
+ """
+ Generates a temporary file for tests with the content
+ of 'file_content' and returns the filename.
+ Don't forget to unlink the file with os.unlink() after.
+ """
+ local_tempfile = tempfile.NamedTemporaryFile(delete=False)
+ local_tempfile.write(file_content.encode('UTF-8'))
+ local_tempfile.close()
+ return local_tempfile.name
+
+ def test_parse_env_file_proper(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_with_equals_character(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\nPASS=sec==ret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_commented_line(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\n#PASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_newline(self):
+ env_file = self.generate_tempfile(
+ file_content='\nUSER=jdoe\n\n\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_invalid_line(self):
+ env_file = self.generate_tempfile(
+ file_content='USER jdoe')
+ with pytest.raises(DockerException):
+ parse_env_file(env_file)
+ os.unlink(env_file)
+
+
+class ParseHostTest(unittest.TestCase):
+ def test_parse_host(self):
+ invalid_hosts = [
+ '0.0.0.0',
+ 'tcp://',
+ 'udp://127.0.0.1',
+ 'udp://127.0.0.1:2375',
+ ]
+
+ valid_hosts = {
+ '0.0.0.1:5555': 'http://0.0.0.1:5555',
+ ':6666': 'http://127.0.0.1:6666',
+ 'tcp://:7777': 'http://127.0.0.1:7777',
+ 'http://:7777': 'http://127.0.0.1:7777',
+ 'https://kokia.jp:2375': 'https://kokia.jp:2375',
+ 'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
+ 'unix://': 'http+unix://var/run/docker.sock',
+ '12.234.45.127:2375/docker/engine': (
+ 'http://12.234.45.127:2375/docker/engine'
+ ),
+ 'somehost.net:80/service/swarm': (
+ 'http://somehost.net:80/service/swarm'
+ ),
+ 'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
+ '[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
+ 'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
+ '[fd12::82d1]:2375/docker/engine': (
+ 'http://[fd12::82d1]:2375/docker/engine'
+ ),
+ }
+
+ for host in invalid_hosts:
+ with pytest.raises(DockerException):
+ parse_host(host, None)
+
+ for host, expected in valid_hosts.items():
+ assert parse_host(host, None) == expected
+
+ def test_parse_host_empty_value(self):
+ unix_socket = 'http+unix://var/run/docker.sock'
+ npipe = 'npipe:////./pipe/docker_engine'
+
+ for val in [None, '']:
+ assert parse_host(val, is_win32=False) == unix_socket
+ assert parse_host(val, is_win32=True) == npipe
+
+ def test_parse_host_tls(self):
+ host_value = 'myhost.docker.net:3348'
+ expected_result = 'https://myhost.docker.net:3348'
+ assert parse_host(host_value, tls=True) == expected_result
+
+ def test_parse_host_tls_tcp_proto(self):
+ host_value = 'tcp://myhost.docker.net:3348'
+ expected_result = 'https://myhost.docker.net:3348'
+ assert parse_host(host_value, tls=True) == expected_result
+
+ def test_parse_host_trailing_slash(self):
+ host_value = 'tcp://myhost.docker.net:2376/'
+ expected_result = 'http://myhost.docker.net:2376'
+ assert parse_host(host_value) == expected_result
+
+
+class ParseRepositoryTagTest(unittest.TestCase):
+ sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+
+ def test_index_image_no_tag(self):
+ assert parse_repository_tag("root") == ("root", None)
+
+ def test_index_image_tag(self):
+ assert parse_repository_tag("root:tag") == ("root", "tag")
+
+ def test_index_user_image_no_tag(self):
+ assert parse_repository_tag("user/repo") == ("user/repo", None)
+
+ def test_index_user_image_tag(self):
+ assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag")
+
+ def test_private_reg_image_no_tag(self):
+ assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", None)
+
+ def test_private_reg_image_tag(self):
+ assert parse_repository_tag("url:5000/repo:tag") == (
+ "url:5000/repo", "tag"
+ )
+
+ def test_index_image_sha(self):
+ assert parse_repository_tag("root@sha256:{0}".format(self.sha)) == (
+ "root", "sha256:{0}".format(self.sha)
+ )
+
+ def test_private_reg_image_sha(self):
+ assert parse_repository_tag(
+ "url:5000/repo@sha256:{0}".format(self.sha)
+ ) == ("url:5000/repo", "sha256:{0}".format(self.sha))
+
+
+class ParseDeviceTest(unittest.TestCase):
+ def test_dict(self):
+ devices = parse_devices([{
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }
+
+ def test_partial_string_definition(self):
+ devices = parse_devices(['/dev/sda1'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/sda1',
+ 'CgroupPermissions': 'rwm'
+ }
+
+ def test_permissionless_string_definition(self):
+ devices = parse_devices(['/dev/sda1:/dev/mnt1'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'rwm'
+ }
+
+ def test_full_string_definition(self):
+ devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }
+
+ def test_hybrid_list(self):
+ devices = parse_devices([
+ '/dev/sda1:/dev/mnt1:rw',
+ {
+ 'PathOnHost': '/dev/sda2',
+ 'PathInContainer': '/dev/mnt2',
+ 'CgroupPermissions': 'r'
+ }
+ ])
+
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'rw'
+ }
+ assert devices[1] == {
+ 'PathOnHost': '/dev/sda2',
+ 'PathInContainer': '/dev/mnt2',
+ 'CgroupPermissions': 'r'
+ }
+
+
+class ParseBytesTest(unittest.TestCase):
+ def test_parse_bytes_valid(self):
+ assert parse_bytes("512MB") == 536870912
+ assert parse_bytes("512M") == 536870912
+ assert parse_bytes("512m") == 536870912
+
+ def test_parse_bytes_invalid(self):
+ with pytest.raises(DockerException):
+ parse_bytes("512MK")
+ with pytest.raises(DockerException):
+ parse_bytes("512L")
+ with pytest.raises(DockerException):
+ parse_bytes("127.0.0.1K")
+
+ def test_parse_bytes_float(self):
+ with pytest.raises(DockerException):
+ parse_bytes("1.5k")
+
+ def test_parse_bytes_maxint(self):
+ assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
+
+
+class UtilsTest(unittest.TestCase):
+ longMessage = True
+
+ def test_convert_filters(self):
+ tests = [
+ ({'dangling': True}, '{"dangling": ["true"]}'),
+ ({'dangling': "true"}, '{"dangling": ["true"]}'),
+ ({'exited': 0}, '{"exited": [0]}'),
+ ({'exited': [0, 1]}, '{"exited": [0, 1]}'),
+ ]
+
+ for filters, expected in tests:
+ assert convert_filters(filters) == expected
+
+ def test_decode_json_header(self):
+ obj = {'a': 'b', 'c': 1}
+ data = None
+ if six.PY3:
+ data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
+ else:
+ data = base64.urlsafe_b64encode(json.dumps(obj))
+ decoded_data = decode_json_header(data)
+ assert obj == decoded_data
+
+
+class SplitCommandTest(unittest.TestCase):
+ def test_split_command_with_unicode(self):
+ assert split_command(u'echo μμ') == ['echo', 'μμ']
+
+ @pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
+ def test_split_command_with_bytes(self):
+ assert split_command('echo μμ') == ['echo', 'μμ']
+
+
+class PortsTest(unittest.TestCase):
+ def test_split_port_with_host_ip(self):
+ internal_port, external_port = split_port("127.0.0.1:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", "1000")]
+
+ def test_split_port_with_protocol(self):
+ internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
+ assert internal_port == ["2000/udp"]
+ assert external_port == [("127.0.0.1", "1000")]
+
+ def test_split_port_with_host_ip_no_port(self):
+ internal_port, external_port = split_port("127.0.0.1::2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", None)]
+
+ def test_split_port_range_with_host_ip_no_port(self):
+ internal_port, external_port = split_port("127.0.0.1::2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)]
+
+ def test_split_port_with_host_port(self):
+ internal_port, external_port = split_port("1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == ["1000"]
+
+ def test_split_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port == ["1000", "1001"]
+
+ def test_split_port_random_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000")
+ assert internal_port == ["2000"]
+ assert external_port == ["1000-1001"]
+
+ def test_split_port_no_host_port(self):
+ internal_port, external_port = split_port("2000")
+ assert internal_port == ["2000"]
+ assert external_port is None
+
+ def test_split_port_range_no_host_port(self):
+ internal_port, external_port = split_port("2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port is None
+
+ def test_split_port_range_with_protocol(self):
+ internal_port, external_port = split_port(
+ "127.0.0.1:1000-1001:2000-2001/udp")
+ assert internal_port == ["2000/udp", "2001/udp"]
+ assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]
+
+ def test_split_port_with_ipv6_address(self):
+ internal_port, external_port = split_port(
+ "2001:abcd:ef00::2:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
+
+ def test_split_port_invalid(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000:tcp")
+
+ def test_non_matching_length_port_ranges(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000-1010:2000-2002/tcp")
+
+ def test_port_and_range_invalid(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000-2002/tcp")
+
+ def test_port_only_with_colon(self):
+ with pytest.raises(ValueError):
+ split_port(":80")
+
+ def test_host_only_with_colon(self):
+ with pytest.raises(ValueError):
+ split_port("localhost:")
+
+ def test_with_no_container_port(self):
+ with pytest.raises(ValueError):
+ split_port("localhost:80:")
+
+ def test_split_port_empty_string(self):
+ with pytest.raises(ValueError):
+ split_port("")
+
+ def test_split_port_non_string(self):
+ assert split_port(1243) == (['1243'], None)
+
+ def test_build_port_bindings_with_one_port(self):
+ port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+
+ def test_build_port_bindings_with_matching_internal_ports(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+
+ def test_build_port_bindings_with_nonmatching_internal_ports(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
+
+ def test_build_port_bindings_with_port_range(self):
+ port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["1001"] == [("127.0.0.1", "1001")]
+
+ def test_build_port_bindings_with_matching_internal_port_ranges(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+ assert port_bindings["1001"] == [
+ ("127.0.0.1", "1001"), ("127.0.0.1", "2001")
+ ]
+
+ def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
+
+
+class FormatEnvironmentTest(unittest.TestCase):
+ def test_format_env_binary_unicode_value(self):
+ env_dict = {
+ 'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
+ }
+ assert format_environment(env_dict) == [u'ARTIST_NAME=송지은']
+
+ def test_format_env_no_value(self):
+ env_dict = {
+ 'FOO': None,
+ 'BAR': '',
+ }
+ assert sorted(format_environment(env_dict)) == ['BAR=', 'FOO']