summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2017-11-04 11:11:17 -0300
committerFelipe Sateler <fsateler@debian.org>2017-11-04 11:11:17 -0300
commit0b182ac57a2fd2fd0278c3f3ff59ae6576c94dcb (patch)
tree71c329967e1a03784da1bf3a897c6a6845894159
parent084e894e112402c602da0120fbf6fed8c7d2774d (diff)
parent1e3588cf8bb29eb2e7f9b852a6d5466ff35b9146 (diff)
Merge branch 'debian/experimental'
-rw-r--r--MANIFEST.in1
-rw-r--r--PKG-INFO101
-rw-r--r--README.md79
-rw-r--r--README.rst90
-rw-r--r--debian/changelog21
-rw-r--r--debian/compat2
-rw-r--r--debian/control17
-rw-r--r--debian/copyright1
-rw-r--r--debian/patches/Do-not-require-pip-for-building.patch32
-rw-r--r--debian/patches/series1
-rw-r--r--docker.egg-info/PKG-INFO110
-rw-r--r--docker.egg-info/SOURCES.txt122
-rw-r--r--docker.egg-info/dependency_links.txt (renamed from docker_py.egg-info/dependency_links.txt)0
-rw-r--r--docker.egg-info/not-zip-safe (renamed from docker_py.egg-info/not-zip-safe)0
-rw-r--r--docker.egg-info/requires.txt (renamed from docker_py.egg-info/requires.txt)2
-rw-r--r--docker.egg-info/top_level.txt (renamed from docker_py.egg-info/top_level.txt)0
-rw-r--r--docker/__init__.py7
-rw-r--r--docker/api/__init__.py10
-rw-r--r--docker/api/build.py158
-rw-r--r--docker/api/client.py455
-rw-r--r--docker/api/container.py984
-rw-r--r--docker/api/daemon.py109
-rw-r--r--docker/api/exec_api.py91
-rw-r--r--docker/api/image.py296
-rw-r--r--docker/api/network.py182
-rw-r--r--docker/api/plugin.py251
-rw-r--r--docker/api/secret.py91
-rw-r--r--docker/api/service.py276
-rw-r--r--docker/api/swarm.py254
-rw-r--r--docker/api/volume.py125
-rw-r--r--docker/auth.py (renamed from docker/auth/auth.py)68
-rw-r--r--docker/auth/__init__.py8
-rw-r--r--docker/client.py578
-rw-r--r--docker/constants.py5
-rw-r--r--docker/errors.py97
-rw-r--r--docker/models/__init__.py0
-rw-r--r--docker/models/containers.py949
-rw-r--r--docker/models/images.py290
-rw-r--r--docker/models/networks.py192
-rw-r--r--docker/models/nodes.py107
-rw-r--r--docker/models/plugins.py200
-rw-r--r--docker/models/resource.py93
-rw-r--r--docker/models/secrets.py69
-rw-r--r--docker/models/services.py273
-rw-r--r--docker/models/swarm.py147
-rw-r--r--docker/models/volumes.py99
-rw-r--r--docker/ssladapter/__init__.py1
-rw-r--r--docker/tls.py23
-rw-r--r--docker/transport/__init__.py3
-rw-r--r--docker/transport/npipeconn.py11
-rw-r--r--docker/transport/npipesocket.py1
-rw-r--r--docker/transport/ssladapter.py (renamed from docker/ssladapter/ssladapter.py)5
-rw-r--r--docker/transport/unixconn.py9
-rw-r--r--docker/types/__init__.py8
-rw-r--r--docker/types/containers.py584
-rw-r--r--docker/types/healthcheck.py63
-rw-r--r--docker/types/networks.py111
-rw-r--r--docker/types/services.py340
-rw-r--r--docker/types/swarm.py7
-rw-r--r--docker/utils/__init__.py14
-rw-r--r--docker/utils/build.py142
-rw-r--r--docker/utils/decorators.py31
-rw-r--r--docker/utils/fnmatch.py103
-rw-r--r--docker/utils/json_stream.py80
-rw-r--r--docker/utils/ports.py83
-rw-r--r--docker/utils/ports/__init__.py4
-rw-r--r--docker/utils/ports/ports.py92
-rw-r--r--docker/utils/socket.py10
-rw-r--r--docker/utils/types.py7
-rw-r--r--docker/utils/utils.py700
-rw-r--r--docker/version.py2
-rw-r--r--docker_py.egg-info/PKG-INFO61
-rw-r--r--docker_py.egg-info/SOURCES.txt82
-rw-r--r--setup.cfg1
-rw-r--r--setup.py35
-rw-r--r--tests/base.py48
-rw-r--r--tests/helpers.py152
-rw-r--r--tests/integration/api_build_test.py (renamed from tests/integration/build_test.py)143
-rw-r--r--tests/integration/api_client_test.py (renamed from tests/integration/api_test.py)81
-rw-r--r--tests/integration/api_container_test.py (renamed from tests/integration/container_test.py)237
-rw-r--r--tests/integration/api_exec_test.py (renamed from tests/integration/exec_test.py)36
-rw-r--r--tests/integration/api_healthcheck_test.py67
-rw-r--r--tests/integration/api_image_test.py (renamed from tests/integration/image_test.py)49
-rw-r--r--tests/integration/api_network_test.py (renamed from tests/integration/network_test.py)98
-rw-r--r--tests/integration/api_plugin_test.py145
-rw-r--r--tests/integration/api_secret_test.py69
-rw-r--r--tests/integration/api_service_test.py478
-rw-r--r--tests/integration/api_swarm_test.py (renamed from tests/integration/swarm_test.py)85
-rw-r--r--tests/integration/api_volume_test.py (renamed from tests/integration/volume_test.py)21
-rw-r--r--tests/integration/base.py114
-rw-r--r--tests/integration/client_test.py29
-rw-r--r--tests/integration/conftest.py6
-rw-r--r--tests/integration/errors_test.py14
-rw-r--r--tests/integration/models_containers_test.py260
-rw-r--r--tests/integration/models_images_test.py99
-rw-r--r--tests/integration/models_networks_test.py64
-rw-r--r--tests/integration/models_nodes_test.py37
-rw-r--r--tests/integration/models_resources_test.py16
-rw-r--r--tests/integration/models_services_test.py103
-rw-r--r--tests/integration/models_swarm_test.py33
-rw-r--r--tests/integration/models_volumes_test.py30
-rw-r--r--tests/integration/regression_test.py6
-rw-r--r--tests/integration/service_test.py189
-rw-r--r--tests/integration/testdata/dummy-plugin/config.json19
-rw-r--r--tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt0
-rw-r--r--tests/unit/api_build_test.py (renamed from tests/unit/build_test.py)9
-rw-r--r--tests/unit/api_container_test.py (renamed from tests/unit/container_test.py)163
-rw-r--r--tests/unit/api_exec_test.py (renamed from tests/unit/exec_test.py)4
-rw-r--r--tests/unit/api_image_test.py (renamed from tests/unit/image_test.py)18
-rw-r--r--tests/unit/api_network_test.py (renamed from tests/unit/network_test.py)40
-rw-r--r--tests/unit/api_test.py106
-rw-r--r--tests/unit/api_volume_test.py (renamed from tests/unit/volume_test.py)34
-rw-r--r--tests/unit/auth_test.py119
-rw-r--r--tests/unit/client_test.py137
-rw-r--r--tests/unit/dockertypes_test.py415
-rw-r--r--tests/unit/errors_test.py87
-rw-r--r--tests/unit/fake_api.py124
-rw-r--r--tests/unit/fake_api_client.py61
-rw-r--r--tests/unit/models_containers_test.py486
-rw-r--r--tests/unit/models_images_test.py112
-rw-r--r--tests/unit/models_networks_test.py64
-rw-r--r--tests/unit/models_resources_test.py28
-rw-r--r--tests/unit/models_services_test.py53
-rw-r--r--tests/unit/ssladapter_test.py12
-rw-r--r--tests/unit/swarm_test.py32
-rw-r--r--tests/unit/utils_json_stream_test.py62
-rw-r--r--tests/unit/utils_test.py418
127 files changed, 12340 insertions, 2628 deletions
diff --git a/MANIFEST.in b/MANIFEST.in
index ee6cdbb..41b3fa9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,3 +5,4 @@ include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
+recursive-include tests/integration/testdata *
diff --git a/PKG-INFO b/PKG-INFO
index cc96266..8dfa851 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,57 +1,106 @@
Metadata-Version: 1.1
-Name: docker-py
-Version: 1.10.6
-Summary: Python client for Docker.
-Home-page: https://github.com/docker/docker-py/
+Name: docker
+Version: 2.4.2
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
License: UNKNOWN
-Description: docker-py
- =========
+Description: Docker SDK for Python
+ =====================
|Build Status|
- A Python library for the Docker Remote API. It does everything the
- ``docker`` command does, but from within Python – run containers, manage
- them, pull/push images, etc.
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
Installation
------------
- The latest stable version is always available on PyPi.
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
::
- pip install docker-py
+ pip install docker
- Documentation
- -------------
+ Usage
+ -----
- |Documentation Status|
+ Connect to Docker using the default socket or the configuration in your
+ environment:
- `Read the full documentation
- here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
- available in the ``docs/`` directory.
+ .. code:: python
- License
- -------
+ import docker
+ client = docker.from_env()
- Docker is licensed under the Apache License, Version 2.0. See LICENSE
- for full license text
+ You can run containers:
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
+ .. code:: python
+
+ >>> client.containers.run("ubuntu", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
+
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
- .. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
+Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
diff --git a/README.md b/README.md
index 876ed02..38963b3 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,73 @@
-docker-py
-=========
+# Docker SDK for Python
-[![Build Status](https://travis-ci.org/docker/docker-py.png)](https://travis-ci.org/docker/docker-py)
+[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
-A Python library for the Docker Remote API. It does everything the `docker` command does, but from within Python – run containers, manage them, pull/push images, etc.
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
-Installation
-------------
+## Installation
-The latest stable version is always available on PyPi.
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
- pip install docker-py
+ pip install docker
-Documentation
--------------
+## Usage
-[![Documentation Status](https://readthedocs.org/projects/docker-py/badge/?version=latest)](https://readthedocs.org/projects/docker-py/?badge=latest)
+Connect to Docker using the default socket or the configuration in your environment:
-[Read the full documentation here](https://docker-py.readthedocs.io/en/latest/).
-The source is available in the `docs/` directory.
+```python
+import docker
+client = docker.from_env()
+```
+You can run containers:
-License
--------
-Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
+```python
+>>> client.containers.run("ubuntu", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print line.strip()
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/README.rst b/README.rst
index 757b82c..78fc607 100644
--- a/README.rst
+++ b/README.rst
@@ -1,37 +1,87 @@
-docker-py
-=========
+Docker SDK for Python
+=====================
|Build Status|
-A Python library for the Docker Remote API. It does everything the
-``docker`` command does, but from within Python – run containers, manage
-them, pull/push images, etc.
+A Python library for the Docker Engine API. It lets you do anything the
+``docker`` command does, but from within Python apps – run containers,
+manage containers, manage Swarms, etc.
Installation
------------
-The latest stable version is always available on PyPi.
+The latest stable version `is available on
+PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+your ``requirements.txt`` file or install with pip:
::
- pip install docker-py
+ pip install docker
-Documentation
--------------
+Usage
+-----
-|Documentation Status|
+Connect to Docker using the default socket or the configuration in your
+environment:
-`Read the full documentation
-here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
-available in the ``docs/`` directory.
+.. code:: python
-License
--------
+ import docker
+ client = docker.from_env()
-Docker is licensed under the Apache License, Version 2.0. See LICENSE
-for full license text
+You can run containers:
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
+.. code:: python
+
+ >>> client.containers.run("ubuntu", "echo hello world")
+ 'hello world\n'
+
+You can run containers in the background:
+
+.. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+You can manage containers:
+
+.. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+You can stream logs:
+
+.. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+You can manage images:
+
+.. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+`Read the full documentation <https://docker-py.readthedocs.io>`__ to
+see everything you can do.
+
+.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
-.. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
diff --git a/debian/changelog b/debian/changelog
index af70a25..5144007 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,26 @@
-python-docker (1.10.6-1) UNRELEASED; urgency=medium
+python-docker (2.4.2-2) UNRELEASED; urgency=medium
+ * Add Breaks: python-magnum (<< 5.0.0), python-senlin (<< 2.0.0).
+
+ -- Thomas Goirand <zigo@debian.org> Fri, 03 Nov 2017 17:09:17 +0000
+
+python-docker (2.4.2-1) experimental; urgency=medium
+
+ [ Jason Pleau ]
* New upstream release
* Refresh requirements patch
+ * Update dependencies for new upstream version
+ * Add myself to debian/copyright
- -- Jason Pleau <jason@jpleau.ca> Sun, 25 Dec 2016 20:09:21 -0500
+ [ Felipe Sateler ]
+ * Do not require pip for building.
+ It is only used to check for docker-py existece,
+ but that doesn't matter in the context of a debian build
+ * Bump standards-version to 4.0.0
+ * Sync dependency versions in python3 and python versions
+ * Bump debhelper compat to 10
+
+ -- Felipe Sateler <fsateler@debian.org> Sun, 16 Jul 2017 11:58:44 -0400
python-docker (1.9.0-1) unstable; urgency=medium
diff --git a/debian/compat b/debian/compat
index ec63514..f599e28 100644
--- a/debian/compat
+++ b/debian/compat
@@ -1 +1 @@
-9
+10
diff --git a/debian/control b/debian/control
index 5fef447..b12f63c 100644
--- a/debian/control
+++ b/debian/control
@@ -3,29 +3,29 @@ Section: python
Priority: optional
Maintainer: Felipe Sateler <fsateler@debian.org>
Uploaders: Jason Pleau <jason@jpleau.ca>
-Build-Depends: debhelper (>= 9),
+Build-Depends: debhelper (>= 10),
dh-python,
- python-all (>= 2.6.6-3~),
- python3-all (>= 3.1.2~),
+ python-all (>= 2.7),
+ python3-all,
python-setuptools,
python3-setuptools,
# requirements.txt
- python-requests (>= 2.5.3~),
+ python-requests (>= 2.11.1~),
python-six (>= 1.4.0~),
python-websocket (>= 0.32.0~),
# requirements3.txt
- python3-requests (>= 2.5.3~),
+ python3-requests (>= 2.11.1~),
python3-six (>= 1.4.0~),
python3-websocket (>= 0.32.0~),
# test-requirements.txt
# python-mock (>= 1.0.1~),
# python-coverage (>= 3.7.1~),
# docker.io,
-Standards-Version: 3.9.8
+Standards-Version: 4.1.1
Homepage: https://github.com/docker/docker-py
Vcs-Git: https://anonscm.debian.org/git/collab-maint/python-docker.git
Vcs-Browser: https://anonscm.debian.org/git/collab-maint/python-docker.git
-X-Python-Version: >= 2.6
+X-Python-Version: >= 2.7
X-Python3-Version: >= 3.5
Package: python-docker
@@ -33,6 +33,9 @@ Architecture: all
Depends: ${misc:Depends}, ${python:Depends},
python-backports.ssl-match-hostname (>= 3.5),
python-ipaddress (>=1.0.16)
+Breaks: python-magnum (<< 5.0.0),
+ python-senlin (<< 2.0.0),
+ docker-compose (<< 1.10)
Description: Python wrapper to access docker.io's control socket
This package contains oodles of routines that aid in controlling
docker.io over it's socket control, the same way the docker.io
diff --git a/debian/copyright b/debian/copyright
index 6f5e671..75d9002 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -11,6 +11,7 @@ Files: debian/*
Copyright: 2014 Paul R. Tagliamonte <paultag@debian.org>
2014 Tianon Gravi <tianon@debian.org>
2016 Ondřej Nový <novy@ondrej.org>
+ 2017 Jason Pleau <jason@jpleau.ca>
License: Apache-2.0
License: Apache-2.0
diff --git a/debian/patches/Do-not-require-pip-for-building.patch b/debian/patches/Do-not-require-pip-for-building.patch
new file mode 100644
index 0000000..18d13d8
--- /dev/null
+++ b/debian/patches/Do-not-require-pip-for-building.patch
@@ -0,0 +1,32 @@
+From: Felipe Sateler <fsateler@debian.org>
+Date: Sun, 16 Jul 2017 11:34:24 -0400
+Subject: Do not require pip for building
+
+It is only used to check for docker-py existece,
+but that doesn't matter in the context of a debian build
+---
+ setup.py | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index 31180d2..0345fab 100644
+--- a/setup.py
++++ b/setup.py
+@@ -5,17 +5,8 @@ import codecs
+ import os
+ import sys
+
+-import pip
+-
+ from setuptools import setup, find_packages
+
+-if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:
+- print(
+- 'ERROR: "docker-py" needs to be uninstalled before installing this'
+- ' package:\npip uninstall docker-py', file=sys.stderr
+- )
+- sys.exit(1)
+-
+ ROOT_DIR = os.path.dirname(__file__)
+ SOURCE_DIR = os.path.join(ROOT_DIR)
+
diff --git a/debian/patches/series b/debian/patches/series
index f499a22..0d3bb78 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1 +1,2 @@
requirements.patch
+Do-not-require-pip-for-building.patch
diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO
new file mode 100644
index 0000000..8dfa851
--- /dev/null
+++ b/docker.egg-info/PKG-INFO
@@ -0,0 +1,110 @@
+Metadata-Version: 1.1
+Name: docker
+Version: 2.4.2
+Summary: A Python library for the Docker Engine API.
+Home-page: https://github.com/docker/docker-py
+Author: Joffrey F
+Author-email: joffrey@docker.com
+License: UNKNOWN
+Description: Docker SDK for Python
+ =====================
+
+ |Build Status|
+
+ A Python library for the Docker Engine API. It lets you do anything the
+ ``docker`` command does, but from within Python apps – run containers,
+ manage containers, manage Swarms, etc.
+
+ Installation
+ ------------
+
+ The latest stable version `is available on
+ PyPI <https://pypi.python.org/pypi/docker/>`__. Either add ``docker`` to
+ your ``requirements.txt`` file or install with pip:
+
+ ::
+
+ pip install docker
+
+ Usage
+ -----
+
+ Connect to Docker using the default socket or the configuration in your
+ environment:
+
+ .. code:: python
+
+ import docker
+ client = docker.from_env()
+
+ You can run containers:
+
+ .. code:: python
+
+ >>> client.containers.run("ubuntu", "echo hello world")
+ 'hello world\n'
+
+ You can run containers in the background:
+
+ .. code:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+ You can manage containers:
+
+ .. code:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+ You can stream logs:
+
+ .. code:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+ You can manage images:
+
+ .. code:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+ `Read the full documentation <https://docker-py.readthedocs.io>`__ to
+ see everything you can do.
+
+ .. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
+ :target: https://travis-ci.org/docker/docker-py
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Topic :: Utilities
+Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt
new file mode 100644
index 0000000..ae54eed
--- /dev/null
+++ b/docker.egg-info/SOURCES.txt
@@ -0,0 +1,122 @@
+LICENSE
+MANIFEST.in
+README.md
+README.rst
+requirements.txt
+setup.cfg
+setup.py
+test-requirements.txt
+docker/__init__.py
+docker/auth.py
+docker/client.py
+docker/constants.py
+docker/errors.py
+docker/tls.py
+docker/version.py
+docker.egg-info/PKG-INFO
+docker.egg-info/SOURCES.txt
+docker.egg-info/dependency_links.txt
+docker.egg-info/not-zip-safe
+docker.egg-info/requires.txt
+docker.egg-info/top_level.txt
+docker/api/__init__.py
+docker/api/build.py
+docker/api/client.py
+docker/api/container.py
+docker/api/daemon.py
+docker/api/exec_api.py
+docker/api/image.py
+docker/api/network.py
+docker/api/plugin.py
+docker/api/secret.py
+docker/api/service.py
+docker/api/swarm.py
+docker/api/volume.py
+docker/models/__init__.py
+docker/models/containers.py
+docker/models/images.py
+docker/models/networks.py
+docker/models/nodes.py
+docker/models/plugins.py
+docker/models/resource.py
+docker/models/secrets.py
+docker/models/services.py
+docker/models/swarm.py
+docker/models/volumes.py
+docker/transport/__init__.py
+docker/transport/npipeconn.py
+docker/transport/npipesocket.py
+docker/transport/ssladapter.py
+docker/transport/unixconn.py
+docker/types/__init__.py
+docker/types/base.py
+docker/types/containers.py
+docker/types/healthcheck.py
+docker/types/networks.py
+docker/types/services.py
+docker/types/swarm.py
+docker/utils/__init__.py
+docker/utils/build.py
+docker/utils/decorators.py
+docker/utils/fnmatch.py
+docker/utils/json_stream.py
+docker/utils/ports.py
+docker/utils/socket.py
+docker/utils/utils.py
+tests/__init__.py
+tests/helpers.py
+tests/integration/__init__.py
+tests/integration/api_build_test.py
+tests/integration/api_client_test.py
+tests/integration/api_container_test.py
+tests/integration/api_exec_test.py
+tests/integration/api_healthcheck_test.py
+tests/integration/api_image_test.py
+tests/integration/api_network_test.py
+tests/integration/api_plugin_test.py
+tests/integration/api_secret_test.py
+tests/integration/api_service_test.py
+tests/integration/api_swarm_test.py
+tests/integration/api_volume_test.py
+tests/integration/base.py
+tests/integration/client_test.py
+tests/integration/conftest.py
+tests/integration/errors_test.py
+tests/integration/models_containers_test.py
+tests/integration/models_images_test.py
+tests/integration/models_networks_test.py
+tests/integration/models_nodes_test.py
+tests/integration/models_resources_test.py
+tests/integration/models_services_test.py
+tests/integration/models_swarm_test.py
+tests/integration/models_volumes_test.py
+tests/integration/regression_test.py
+tests/integration/testdata/dummy-plugin/config.json
+tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
+tests/unit/__init__.py
+tests/unit/api_build_test.py
+tests/unit/api_container_test.py
+tests/unit/api_exec_test.py
+tests/unit/api_image_test.py
+tests/unit/api_network_test.py
+tests/unit/api_test.py
+tests/unit/api_volume_test.py
+tests/unit/auth_test.py
+tests/unit/client_test.py
+tests/unit/dockertypes_test.py
+tests/unit/errors_test.py
+tests/unit/fake_api.py
+tests/unit/fake_api_client.py
+tests/unit/fake_stat.py
+tests/unit/models_containers_test.py
+tests/unit/models_images_test.py
+tests/unit/models_networks_test.py
+tests/unit/models_resources_test.py
+tests/unit/models_services_test.py
+tests/unit/ssladapter_test.py
+tests/unit/swarm_test.py
+tests/unit/utils_json_stream_test.py
+tests/unit/utils_test.py
+tests/unit/testdata/certs/ca.pem
+tests/unit/testdata/certs/cert.pem
+tests/unit/testdata/certs/key.pem \ No newline at end of file
diff --git a/docker_py.egg-info/dependency_links.txt b/docker.egg-info/dependency_links.txt
index 8b13789..8b13789 100644
--- a/docker_py.egg-info/dependency_links.txt
+++ b/docker.egg-info/dependency_links.txt
diff --git a/docker_py.egg-info/not-zip-safe b/docker.egg-info/not-zip-safe
index 8b13789..8b13789 100644
--- a/docker_py.egg-info/not-zip-safe
+++ b/docker.egg-info/not-zip-safe
diff --git a/docker_py.egg-info/requires.txt b/docker.egg-info/requires.txt
index a7ddcd4..981d1a9 100644
--- a/docker_py.egg-info/requires.txt
+++ b/docker.egg-info/requires.txt
@@ -1,4 +1,4 @@
-requests >= 2.5.2, != 2.11.0
+requests >= 2.5.2, != 2.11.0, != 2.12.2, != 2.18.0
six >= 1.4.0
websocket-client >= 0.32.0
docker-pycreds >= 0.2.1
diff --git a/docker_py.egg-info/top_level.txt b/docker.egg-info/top_level.txt
index bdb9670..bdb9670 100644
--- a/docker_py.egg-info/top_level.txt
+++ b/docker.egg-info/top_level.txt
diff --git a/docker/__init__.py b/docker/__init__.py
index ad53805..cf732e1 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,7 @@
+# flake8: noqa
+from .api import APIClient
+from .client import DockerClient, from_env
from .version import version, version_info
__version__ = version
-__title__ = 'docker-py'
-
-from .client import Client, AutoVersionClient, from_env # flake8: noqa
+__title__ = 'docker'
diff --git a/docker/api/__init__.py b/docker/api/__init__.py
index bc7e93c..ff51844 100644
--- a/docker/api/__init__.py
+++ b/docker/api/__init__.py
@@ -1,10 +1,2 @@
# flake8: noqa
-from .build import BuildApiMixin
-from .container import ContainerApiMixin
-from .daemon import DaemonApiMixin
-from .exec_api import ExecApiMixin
-from .image import ImageApiMixin
-from .network import NetworkApiMixin
-from .service import ServiceApiMixin
-from .swarm import SwarmApiMixin
-from .volume import VolumeApiMixin
+from .client import APIClient
diff --git a/docker/api/build.py b/docker/api/build.py
index 7403716..cbef4a8 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,11 +1,11 @@
+import json
import logging
import os
import re
-import json
+from .. import auth
from .. import constants
from .. import errors
-from .. import auth
from .. import utils
@@ -17,7 +17,97 @@ class BuildApiMixin(object):
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False, buildargs=None, gzip=False):
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None):
+ """
+ Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+ needs to be set. ``path`` can be a local path (to a directory
+ containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+ readable file-like object to a Dockerfile.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ Example:
+ >>> from io import BytesIO
+ >>> from docker import APIClient
+ >>> dockerfile = '''
+ ... # Shared Volume
+ ... FROM busybox:buildroot-2014.02
+ ... VOLUME /data
+ ... CMD ["/bin/sh"]
+ ... '''
+ >>> f = BytesIO(dockerfile.encode('utf-8'))
+ >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+ >>> response = [line for line in cli.build(
+ ... fileobj=f, rm=True, tag='yourname/volume'
+ ... )]
+ >>> response
+ ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+ '{"stream":"Step 1 : VOLUME /data\\n"}',
+ '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+ '{"stream":" ---\\u003e 713bca62012e\\n"}',
+ '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+ '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+ '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+ '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+ '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+ '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ stream (bool): *Deprecated for API version > 1.8 (always True)*.
+ Return a blocking generator you can iterate over to retrieve
+ build output as it happens
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (list): A list of images used for build cache
+ resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+
+ Returns:
+ A generator for the build output.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
remote = context = None
headers = {}
container_limits = container_limits or {}
@@ -88,6 +178,46 @@ class BuildApiMixin(object):
'buildargs was only introduced in API version 1.21'
)
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
+ if cache_from:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'cachefrom': json.dumps(cache_from)})
+ else:
+ raise errors.InvalidVersion(
+ 'cache_from was only introduced in API version 1.25'
+ )
+
+ if target:
+ if utils.version_gte(self._version, '1.29'):
+ params.update({'target': target})
+ else:
+ raise errors.InvalidVersion(
+ 'target was only introduced in API version 1.29'
+ )
+
+ if network_mode:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'networkmode': network_mode})
+ else:
+ raise errors.InvalidVersion(
+ 'network_mode was only introduced in API version 1.25'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
@@ -130,19 +260,35 @@ class BuildApiMixin(object):
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
+ auth_data = {}
+ if self._auth_configs.get('credsStore'):
+ # Using a credentials store, we need to retrieve the
+ # credentials for each registry listed in the config.json file
+ # Matches CLI behavior: https://github.com/docker/docker/blob/
+ # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
+ # credentials/native_store.go#L68-L83
+ for registry in self._auth_configs.keys():
+ if registry == 'credsStore' or registry == 'HttpHeaders':
+ continue
+ auth_data[registry] = auth.resolve_authconfig(
+ self._auth_configs, registry
+ )
+ else:
+ auth_data = self._auth_configs
+
log.debug(
'Sending auth config ({0})'.format(
- ', '.join(repr(k) for k in self._auth_configs.keys())
+ ', '.join(repr(k) for k in auth_data.keys())
)
)
if utils.compare_version('1.19', self._version) >= 0:
headers['X-Registry-Config'] = auth.encode_header(
- self._auth_configs
+ auth_data
)
else:
headers['X-Registry-Config'] = auth.encode_header({
- 'configs': self._auth_configs
+ 'configs': auth_data
})
else:
log.debug('No auth config found')
diff --git a/docker/api/client.py b/docker/api/client.py
new file mode 100644
index 0000000..65b5baa
--- /dev/null
+++ b/docker/api/client.py
@@ -0,0 +1,455 @@
+import json
+import struct
+import warnings
+from functools import partial
+
+import requests
+import requests.exceptions
+import six
+import websocket
+
+from .build import BuildApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+from .. import auth
+from ..constants import (
+ DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
+ MINIMUM_DOCKER_API_VERSION
+)
+from ..errors import (
+ DockerException, TLSParameterError,
+ create_api_error_from_http_exception
+)
+from ..tls import TLSConfig
+from ..transport import SSLAdapter, UnixAdapter
+from ..utils import utils, check_resource, update_headers
+from ..utils.socket import frames_iter
+from ..utils.json_stream import json_stream
+try:
+ from ..transport import NpipeAdapter
+except ImportError:
+ pass
+
+
+class APIClient(
+ requests.Session,
+ BuildApiMixin,
+ ContainerApiMixin,
+ DaemonApiMixin,
+ ExecApiMixin,
+ ImageApiMixin,
+ NetworkApiMixin,
+ PluginApiMixin,
+ SecretApiMixin,
+ ServiceApiMixin,
+ SwarmApiMixin,
+ VolumeApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.24',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2016-09-27T23:38:15.810178467+00:00',
+ u'Experimental': True,
+ u'GitCommit': u'45bed2c',
+ u'GoVersion': u'go1.6.3',
+ u'KernelVersion': u'4.4.22-moby',
+ u'Os': u'linux',
+ u'Version': u'1.12.2-rc1'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.26``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+
+ __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
+ super(APIClient, self).__init__()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._auth_configs = auth.load_config()
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://localunixsocket'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ except NameError:
+ raise DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None:
+ self._version = DEFAULT_DOCKER_API_VERSION
+ elif isinstance(version, six.string_types):
+ if version.lower() == 'auto':
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ else:
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ warnings.warn(
+ 'The minimum API version supported is {}, but you are using '
+ 'version {}. It is recommended you either upgrade Docker '
+ 'Engine or use an older version of Docker SDK for '
+ 'Python.'.format(MINIMUM_DOCKER_API_VERSION, self._version)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, six.string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise create_api_error_from_http_exception(e)
+
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in six.iteritems(data):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ @check_resource('container')
+ def _attach_websocket(self, container, params=None):
+ url = self._url("/containers/{0}/attach/ws", container)
+ req = requests.Request("POST", url, params=self._attach_params(params))
+ full_url = req.prepare().url
+ full_url = full_url.replace("http://", "ws://", 1)
+ full_url = full_url.replace("https://", "wss://", 1)
+ return self._create_websocket_connection(full_url)
+
+ def _create_websocket_connection(self, url):
+ return websocket.create_connection(url)
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif six.PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ for chunk in json_stream(self._stream_helper(response, False)):
+ yield chunk
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ _, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result_old(self, response):
+ ''' Stream raw output for API versions below 1.6 '''
+ self._raise_for_status(response)
+ for line in response.iter_lines(chunk_size=1,
+ decode_unicode=True):
+ # filter out keep-alive new lines
+ if line:
+ yield line
+
+ def _stream_raw_result(self, response):
+ ''' Stream result for TTY-enabled container above API 1.6 '''
+ self._raise_for_status(response)
+ for out in response.iter_content(chunk_size=1, decode_unicode=True):
+ yield out
+
+ def _read_from_socket(self, response, stream):
+ socket = self._get_raw_response_socket(response)
+
+ if stream:
+ return frames_iter(socket)
+ else:
+ return six.binary_type().join(frames_iter(socket))
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ def _get_result(self, container, stream, res):
+ cont = self.inspect_container(container)
+ return self._get_result_tty(stream, res, cont['Config']['Tty'])
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # Stream multi-plexing was only introduced in API v1.6. Anything
+ # before that needs old-style streaming.
+ if utils.compare_version('1.6', self._version) < 0:
+ return self._stream_raw_result_old(res)
+
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = six.binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except requests.exceptions.InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(dockercfg_path)
diff --git a/docker/api/container.py b/docker/api/container.py
index b8507d8..532a9c6 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -4,13 +4,39 @@ from datetime import datetime
from .. import errors
from .. import utils
-from ..utils.utils import create_networking_config, create_endpoint_config
+from ..types import (
+ ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig
+)
class ContainerApiMixin(object):
- @utils.check_resource
+ @utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
+ """
+ Attach to a container.
+
+ The ``.logs()`` function is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ container (str): The container to attach to.
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
@@ -28,8 +54,22 @@ class ContainerApiMixin(object):
return self._read_from_socket(response, stream)
- @utils.check_resource
+ @utils.check_resource('container')
def attach_socket(self, container, params=None, ws=False):
+ """
+ Like ``attach``, but returns the underlying socket-like object for the
+ HTTP request.
+
+ Args:
+ container (str): The container to attach to.
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if params is None:
params = {
'stdout': 1,
@@ -53,9 +93,29 @@ class ContainerApiMixin(object):
)
)
- @utils.check_resource
+ @utils.check_resource('container')
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ container (str): The image hash of the container
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'container': container,
'repo': repository,
@@ -71,6 +131,50 @@ class ContainerApiMixin(object):
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ quiet (bool): Only display numeric Ids
+ all (bool): Show all containers. Only running containers are shown
+ by default trunc (bool): Truncate output
+ latest (bool): Show only the latest created container, include
+ non-running ones.
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ size (bool): Display sizes
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ A list of dicts, one per container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
@@ -91,16 +195,34 @@ class ContainerApiMixin(object):
x['Id'] = x['Id'][:12]
return res
- @utils.check_resource
+ @utils.check_resource('container')
def copy(self, container, resource):
+ """
+ Identical to the ``docker cp`` command. Get files/folders from the
+ container.
+
+ **Deprecated for API version >= 1.20.** Use
+ :py:meth:`~ContainerApiMixin.get_archive` instead.
+
+ Args:
+ container (str): The container to copy from
+ resource (str): The path within the container
+
+ Returns:
+ The contents of the file as a string
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if utils.version_gte(self._version, '1.20'):
warnings.warn(
- 'Client.copy() is deprecated for API version >= 1.20, '
+ 'APIClient.copy() is deprecated for API version >= 1.20, '
'please use get_archive() instead',
DeprecationWarning
)
res = self._post_json(
- self._url("/containers/{0}/copy".format(container)),
+ self._url("/containers/{0}/copy", container),
data={"Resource": resource},
stream=True
)
@@ -115,8 +237,197 @@ class ContainerApiMixin(object):
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None,
- stop_signal=None, networking_config=None):
+ stop_signal=None, networking_config=None,
+ healthcheck=None, stop_timeout=None, runtime=None):
+ """
+ Creates a container. Parameters are similar to those for the ``docker
+ run`` command except it doesn't support the attach options (``-a``).
+
+ The arguments that are passed directly to this function are
+ host-independent configuration options. Host-specific configuration
+ is passed with the `host_config` argument. You'll normally want to
+ use this method in combination with the :py:meth:`create_host_config`
+ method to generate ``host_config``.
+
+ **Port bindings**
+
+ Port binding is done in two parts: first, provide a list of ports to
+ open inside the container with the ``ports`` parameter, then declare
+ bindings with the ``host_config`` parameter. For example:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[1111, 2222],
+ host_config=cli.create_host_config(port_bindings={
+ 1111: 4567,
+ 2222: None
+ })
+ )
+
+
+ You can limit the host address on which the port will be exposed like
+ such:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+
+ Or without host port assignment:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+
+ If you wish to use UDP instead of TCP (default), you need to declare
+ ports as such in both the config and host config:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
+ host_config=cli.create_host_config(port_bindings={
+ '1111/udp': 4567, 2222: None
+ })
+ )
+
+ To bind multiple host ports to a single container port, use the
+ following syntax:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [1234, 4567]
+ })
+
+ You can also bind multiple IPs to a single container port:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [
+ ('192.168.0.100', 1234),
+ ('192.168.0.101', 1234)
+ ]
+ })
+
+ **Using volumes**
+ Volume declaration is done in two parts. Provide a list of
+ paths to use as mountpoints inside the container with the
+ ``volumes`` parameter, and declare mappings from paths on the host
+ in the ``host_config`` section.
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds={
+ '/home/user1/': {
+ 'bind': '/mnt/vol2',
+ 'mode': 'rw',
+ },
+ '/var/www': {
+ 'bind': '/mnt/vol1',
+ 'mode': 'ro',
+ }
+ })
+ )
+
+ You can alternatively specify binds as a list. This code is equivalent
+ to the example above:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ])
+ )
+
+ **Networking**
+
+ You can specify networks to connect the container to by using the
+ ``networking_config`` parameter. At the time of creation, you can
+ only connect a container to a single networking, but you
+ can create more connections by using
+ :py:meth:`~connect_container_to_network`.
+
+ For example:
+
+ .. code-block:: python
+
+ networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config(
+ ipv4_address='172.28.0.124',
+ aliases=['foo', 'bar'],
+ links=['container2']
+ )
+ })
+
+ ctnr = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ Args:
+ image (str): The image to run
+ command (str or list): The command to be run in the container
+ hostname (str): Optional hostname for the container
+ user (str or int): Username or UID
+ detach (bool): Detached mode: run container in the background and
+ return container ID
+ stdin_open (bool): Keep STDIN open even if not attached
+ tty (bool): Allocate a pseudo-TTY
+ mem_limit (float or str): Memory limit. Accepts float values (which
+ represent the memory limit of the created container in bytes)
+ or a string with a units identification char (``100000b``,
+ ``1000k``, ``128m``, ``1g``). If a string is specified without
+ a units character, bytes are assumed as an intended unit.
+ ports (list of ints): A list of port numbers
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ dns (:py:class:`list`): DNS name servers. Deprecated since API
+ version 1.10. Use ``host_config`` instead.
+ volumes (str or list): List of paths inside the container to use
+ as volumes.
+ volumes_from (:py:class:`list`): List of container names or Ids to
+ get volumes from.
+ network_disabled (bool): Disable networking
+ name (str): A name for the container
+ entrypoint (str or list): An entrypoint
+ working_dir (str): Path to the working directory
+ domainname (str or list): Set custom DNS search domains
+ memswap_limit (int):
+ host_config (dict): A dictionary created with
+ :py:meth:`create_host_config`.
+ mac_address (str): The Mac Address to assign the container
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ volume_driver (str): The name of a volume driver/plugin.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ stop_timeout (int): Timeout to stop the container, in seconds.
+ Default: 10
+ networking_config (dict): A networking configuration generated
+ by :py:meth:`create_networking_config`.
+ runtime (str): Runtime to use with this container.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+
+ Returns:
+ A dictionary with an image 'Id' key and a 'Warnings' key.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
@@ -126,16 +437,17 @@ class ContainerApiMixin(object):
)
config = self.create_container_config(
- image, command, hostname, user, detach, stdin_open,
- tty, mem_limit, ports, environment, dns, volumes, volumes_from,
+ image, command, hostname, user, detach, stdin_open, tty, mem_limit,
+ ports, dns, environment, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
- volume_driver, stop_signal, networking_config,
+ volume_driver, stop_signal, networking_config, healthcheck,
+ stop_timeout, runtime
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
- return utils.create_container_config(self._version, *args, **kwargs)
+ return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
@@ -146,6 +458,142 @@ class ContainerApiMixin(object):
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
+ """
+ Create a dictionary for the ``host_config`` argument to
+ :py:meth:`create_container`.
+
+ Args:
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ binds (dict): Volumes to bind. See :py:meth:`create_container`
+ for more information.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file
+ dns_search (:py:class:`list`): DNS search domains.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ lxc_conf (dict): LXC config.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ port_bindings (dict): See :py:meth:`create_container`
+ for more information.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of dicts.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ runtime (str): Runtime to use with this container.
+
+
+ Returns:
+ (dict) A dictionary which can be passed to the ``host_config``
+ argument to :py:meth:`create_container`.
+
+ Example:
+
+ >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ volumes_from=['nostalgic_newton'])
+ {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
+ 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
+
+"""
if not kwargs:
kwargs = {}
if 'version' in kwargs:
@@ -154,31 +602,126 @@ class ContainerApiMixin(object):
"keyword argument 'version'"
)
kwargs['version'] = self._version
- return utils.create_host_config(*args, **kwargs)
+ return HostConfig(*args, **kwargs)
def create_networking_config(self, *args, **kwargs):
- return create_networking_config(*args, **kwargs)
+ """
+ Create a networking config dictionary to be used as the
+ ``networking_config`` parameter in :py:meth:`create_container`.
+
+ Args:
+ endpoints_config (dict): A dictionary mapping network names to
+ endpoint configurations generated by
+ :py:meth:`create_endpoint_config`.
+
+ Returns:
+ (dict) A networking config.
+
+ Example:
+
+ >>> docker_client.create_network('network1')
+ >>> networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config()
+ })
+ >>> container = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ """
+ return NetworkingConfig(*args, **kwargs)
def create_endpoint_config(self, *args, **kwargs):
- return create_endpoint_config(self._version, *args, **kwargs)
+ """
+ Create an endpoint config dictionary to be used with
+ :py:meth:`create_networking_config`.
+
+ Args:
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Returns:
+ (dict) An endpoint config.
+
+ Example:
- @utils.check_resource
+ >>> endpoint_config = client.create_endpoint_config(
+ aliases=['web', 'app'],
+ links=['app_db'],
+ ipv4_address='132.65.0.123'
+ )
+
+ """
+ return EndpointConfig(self._version, *args, **kwargs)
+
+ @utils.check_resource('container')
def diff(self, container):
+ """
+ Inspect changes on a container's filesystem.
+
+ Args:
+ container (str): The container to diff
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
- @utils.check_resource
+ @utils.check_resource('container')
def export(self, container):
+ """
+ Export the contents of a filesystem as a tar archive.
+
+ Args:
+ container (str): The container to export
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
self._raise_for_status(res)
return res.raw
- @utils.check_resource
+ @utils.check_resource('container')
@utils.minimum_version('1.20')
def get_archive(self, container, path):
+ """
+ Retrieve a file or folder from a container in the form of a tar
+ archive.
+
+ Args:
+ container (str): The container where the file is located
+ path (str): Path to the file or folder to retrieve
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'path': path
}
@@ -191,14 +734,39 @@ class ContainerApiMixin(object):
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
- @utils.check_resource
+ @utils.check_resource('container')
def inspect_container(self, container):
+ """
+ Identical to the `docker inspect` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of `docker inspect`, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
- @utils.check_resource
+ @utils.check_resource('container')
def kill(self, container, signal=None):
+ """
+ Kill a container or send a signal to a container.
+
+ Args:
+ container (str): The container to kill
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
@@ -209,9 +777,35 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all', since=None, follow=None):
+ """
+ Get logs from a container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ container (str): The container to get logs from
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+
+ Returns:
+ (generator or str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if utils.compare_version('1.11', self._version) >= 0:
if follow is None:
follow = stream
@@ -235,6 +829,11 @@ class ContainerApiMixin(object):
params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and since > 0):
params['since'] = since
+ else:
+ raise errors.InvalidArgument(
+ 'since value should be datetime or int, not {}'.
+ format(type(since))
+ )
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
@@ -246,14 +845,50 @@ class ContainerApiMixin(object):
logs=True
)
- @utils.check_resource
+ @utils.check_resource('container')
def pause(self, container):
+ """
+ Pauses all processes within a container.
+
+ Args:
+ container (str): The container to pause
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def port(self, container, private_port):
+ """
+ Lookup the public-facing port that is NAT-ed to ``private_port``.
+ Identical to the ``docker port`` command.
+
+ Args:
+ container (str): The container to look up
+ private_port (int): The private port to inspect
+
+ Returns:
+ (list of dict): The mapping for the host ports
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ .. code-block:: bash
+
+ $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
+ 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
+
+ .. code-block:: python
+
+ >>> cli.port('7174d6347063', 80)
+ [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
+ """
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
@@ -275,17 +910,71 @@ class ContainerApiMixin(object):
return h_ports
- @utils.check_resource
+ @utils.check_resource('container')
@utils.minimum_version('1.20')
def put_archive(self, container, path, data):
+ """
+ Insert a file or folder in an existing container using a tar archive as
+ source.
+
+ Args:
+ container (str): The container where the file(s) will be extracted
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
self._raise_for_status(res)
return res.status_code == 200
- @utils.check_resource
+ @utils.minimum_version('1.25')
+ def prune_containers(self, filters=None):
+ """
+ Delete stopped containers
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted container IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/containers/prune')
+ return self._result(self._post(url, params=params), True)
+
+ @utils.check_resource('container')
def remove_container(self, container, v=False, link=False, force=False):
+ """
+ Remove a container. Similar to the ``docker rm`` command.
+
+ Args:
+ container (str): The container to remove
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
@@ -293,98 +982,121 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
@utils.minimum_version('1.17')
- @utils.check_resource
+ @utils.check_resource('container')
def rename(self, container, name):
+ """
+ Rename a container. Similar to the ``docker rename`` command.
+
+ Args:
+ container (str): ID of the container to rename
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def resize(self, container, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ container (str or dict): The container to resize
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def restart(self, container, timeout=10):
+ """
+ Restart a container. Similar to the ``docker restart`` command.
+
+ Args:
+ container (str or dict): The container to restart. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
- @utils.check_resource
- def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=None, links=None, privileged=None,
- dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None, devices=None,
- extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None):
+ @utils.check_resource('container')
+ def start(self, container, *args, **kwargs):
+ """
+ Start a container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
- if utils.compare_version('1.10', self._version) < 0:
- if dns is not None:
- raise errors.InvalidVersion(
- 'dns is only supported for API version >= 1.10'
- )
- if volumes_from is not None:
- raise errors.InvalidVersion(
- 'volumes_from is only supported for API version >= 1.10'
- )
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
- if utils.compare_version('1.15', self._version) < 0:
- if security_opt is not None:
- raise errors.InvalidVersion(
- 'security_opt is only supported for API version >= 1.15'
- )
- if ipc_mode:
- raise errors.InvalidVersion(
- 'ipc_mode is only supported for API version >= 1.15'
- )
- if utils.compare_version('1.17', self._version) < 0:
- if read_only is not None:
- raise errors.InvalidVersion(
- 'read_only is only supported for API version >= 1.17'
- )
- if pid_mode is not None:
- raise errors.InvalidVersion(
- 'pid_mode is only supported for API version >= 1.17'
- )
+ Args:
+ container (str): The container to start
- if utils.compare_version('1.18', self._version) < 0:
- if ulimits is not None:
- raise errors.InvalidVersion(
- 'ulimits is only supported for API version >= 1.18'
- )
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
- start_config_kwargs = dict(
- binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
- publish_all_ports=publish_all_ports, links=links, dns=dns,
- privileged=privileged, dns_search=dns_search, cap_add=cap_add,
- cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
- network_mode=network_mode, restart_policy=restart_policy,
- extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
- ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
- )
- start_config = None
-
- if any(v is not None for v in start_config_kwargs.values()):
- if utils.compare_version('1.15', self._version) > 0:
- warnings.warn(
- 'Passing host config parameters in start() is deprecated. '
- 'Please use host_config in create_container instead!',
- DeprecationWarning
- )
- start_config = self.create_host_config(**start_config_kwargs)
+ Example:
+ >>> container = cli.create_container(
+ ... image='busybox:latest',
+ ... command='/bin/sleep 30')
+ >>> cli.start(container=container.get('Id'))
+ """
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
url = self._url("/containers/{0}/start", container)
- res = self._post_json(url, data=start_config)
+ res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.17')
- @utils.check_resource
+ @utils.check_resource('container')
def stats(self, container, decode=None, stream=True):
+ """
+ Stream statistics for a specific container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ container (str): The container to stream statistics from
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
@@ -393,8 +1105,20 @@ class ContainerApiMixin(object):
return self._result(self._get(url, params={'stream': False}),
json=True)
- @utils.check_resource
+ @utils.check_resource('container')
def stop(self, container, timeout=10):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ container (str): The container to stop
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
@@ -402,27 +1126,73 @@ class ContainerApiMixin(object):
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
- @utils.check_resource
+ @utils.check_resource('container')
def top(self, container, ps_args=None):
+ """
+ Display the running processes of a container.
+
+ Args:
+ container (str): The container to inspect
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True)
- @utils.check_resource
+ @utils.check_resource('container')
def unpause(self, container):
+ """
+ Unpause all processes within a container.
+
+ Args:
+ container (str): The container to unpause
+ """
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.22')
- @utils.check_resource
+ @utils.check_resource('container')
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
- mem_reservation=None, memswap_limit=None, kernel_memory=None
+ mem_reservation=None, memswap_limit=None, kernel_memory=None,
+ restart_policy=None
):
+ """
+ Update resource configs of one or more containers.
+
+ Args:
+ container (str): The container to inspect
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
@@ -445,12 +1215,38 @@ class ContainerApiMixin(object):
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
+ if restart_policy:
+ if utils.version_lt(self._version, '1.23'):
+ raise errors.InvalidVersion(
+ 'restart policy update is not supported '
+ 'for API version < 1.23'
+ )
+ data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True)
- @utils.check_resource
+ @utils.check_resource('container')
def wait(self, container, timeout=None):
+ """
+ Block until a container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ container (str or dict): The container to wait on. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Request timeout
+
+ Returns:
+ (int): The exit code of the container. Returns ``-1`` if the API
+ responds without a ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 9ebe73c..285b742 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -2,13 +2,58 @@ import os
import warnings
from datetime import datetime
-from ..auth import auth
+from .. import auth, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from ..utils import utils
class DaemonApiMixin(object):
+ @utils.minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ (generator): A blocking generator you can iterate over to retrieve
+ events as they happen.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events()
+ ... print event
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+ """
+
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
@@ -23,17 +68,51 @@ class DaemonApiMixin(object):
'until': until,
'filters': filters
}
+ url = self._url('/events')
return self._stream_helper(
- self.get(self._url('/events'), params=params, stream=True),
+ self._get(url, params=params, stream=True, timeout=None),
decode=decode
)
def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
@@ -69,8 +148,30 @@ class DaemonApiMixin(object):
return self._result(response, json=True)
def ping(self):
- return self._result(self._get(self._url('/_ping')))
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 6e49996..2b407ce 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -6,20 +6,54 @@ from .. import utils
class ExecApiMixin(object):
@utils.minimum_version('1.15')
- @utils.check_resource
+ @utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
- stdin=False, tty=False, privileged=False, user=''):
- if privileged and utils.compare_version('1.19', self._version) < 0:
+ stdin=False, tty=False, privileged=False, user='',
+ environment=None):
+ """
+ Sets up an exec instance in a running container.
+
+ Args:
+ container (str): Target container where exec instance will be
+ created
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+
+ Returns:
+ (dict): A dictionary with an exec ``Id`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if privileged and utils.version_lt(self._version, '1.19'):
raise errors.InvalidVersion(
'Privileged exec is not supported in API < 1.19'
)
- if user and utils.compare_version('1.19', self._version) < 0:
+ if user and utils.version_lt(self._version, '1.19'):
raise errors.InvalidVersion(
'User-specific exec is not supported in API < 1.19'
)
+ if environment is not None and utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Setting environment for exec is not supported in API < 1.25'
+ )
+
if isinstance(cmd, six.string_types):
cmd = utils.split_command(cmd)
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
data = {
'Container': container,
'User': user,
@@ -28,7 +62,8 @@ class ExecApiMixin(object):
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
- 'Cmd': cmd
+ 'Cmd': cmd,
+ 'Env': environment,
}
url = self._url('/containers/{0}/exec', container)
@@ -37,6 +72,19 @@ class ExecApiMixin(object):
@utils.minimum_version('1.16')
def exec_inspect(self, exec_id):
+ """
+ Return low-level information about an exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+
+ Returns:
+ (dict): Dictionary of values returned by the endpoint.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
@@ -44,6 +92,15 @@ class ExecApiMixin(object):
@utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None):
+ """
+ Resize the tty session used by the specified exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ height (int): Height of tty session
+ width (int): Width of tty session
+ """
+
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -53,11 +110,28 @@ class ExecApiMixin(object):
self._raise_for_status(res)
@utils.minimum_version('1.15')
+ @utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
+ """
+ Start a previously set up exec instance.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Stream response data. Default: False
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
# we want opened socket if socket == True
- if isinstance(exec_id, dict):
- exec_id = exec_id.get('Id')
data = {
'Tty': tty,
@@ -75,7 +149,8 @@ class ExecApiMixin(object):
data=data,
stream=True
)
-
+ if detach:
+ return self._result(res)
if socket:
return self._get_raw_response_socket(res)
return self._read_from_socket(res, stream)
diff --git a/docker/api/image.py b/docker/api/image.py
index 7f25f9d..181c4a1 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,31 +1,84 @@
import logging
import os
-import six
import warnings
-from ..auth import auth
+import six
+
+from .. import auth, errors, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from .. import utils
-from .. import errors
log = logging.getLogger(__name__)
class ImageApiMixin(object):
- @utils.check_resource
+ @utils.check_resource('image')
def get_image(self, image):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ image (str): Image name to get
+
+ Returns:
+ (urllib3.response.HTTPResponse object): The response from the
+ daemon.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("fedora:latest")
+ >>> f = open('/tmp/fedora-latest.tar', 'w')
+ >>> f.write(image.data)
+ >>> f.close()
+ """
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
- @utils.check_resource
+ @utils.check_resource('image')
def history(self, image):
+ """
+ Show the history of an image.
+
+ Args:
+ image (str): The image to show history for
+
+ Returns:
+ (str): The history of the image
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
+ """
+ List images. Similar to the ``docker images`` command.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ quiet (bool): Only return numeric IDs as a list.
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (dict or list): A list if ``quiet=True``, otherwise a dict.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
@@ -45,6 +98,25 @@ class ImageApiMixin(object):
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
+ """
+ Import an image. Similar to the ``docker import`` command.
+
+ If ``src`` is a string or unicode string, it will first be treated as a
+ path to a tarball on the local system. If there is an error reading
+ from that file, ``src`` will be treated as a URL instead to fetch the
+ image from. You can also pass an open file handle as ``src``, in which
+ case the data will be read from that file.
+
+ If ``src`` is unset but ``image`` is set, the ``image`` parameter will
+ be taken as the name of an existing image to import from.
+
+ Args:
+ src (str or file): Path to tarfile, URL, or file-like object
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ image (str): Use another image like the ``FROM`` Dockerfile
+ parameter
+ """
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
@@ -78,6 +150,16 @@ class ImageApiMixin(object):
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
+ allows importing in-memory bytes data.
+
+ Args:
+ data (bytes collection): Bytes collection containing valid tar data
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
@@ -88,12 +170,22 @@ class ImageApiMixin(object):
u, data=data, params=params, headers=headers, timeout=None
)
)
- return self.import_image(
- src=data, repository=repository, tag=tag, changes=changes
- )
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a tar file on disk.
+
+ Args:
+ filename (str): Full path to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+
+ Raises:
+ IOError: File does not exist.
+ """
+
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
@@ -107,17 +199,36 @@ class ImageApiMixin(object):
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a URL.
+
+ Args:
+ url (str): A URL pointing to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from another image, like the ``FROM`` Dockerfile
+ parameter.
+
+ Args:
+ image (str): Image name to import from
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
- @utils.check_resource
+ @utils.check_resource('image')
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
@@ -130,18 +241,102 @@ class ImageApiMixin(object):
}
return self._result(self._post(api_url, params=params))
- @utils.check_resource
+ @utils.check_resource('image')
def inspect_image(self, image):
+ """
+ Get detailed information about an image. Similar to the ``docker
+ inspect`` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of ``docker inspect``, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
+ save``). Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+ """
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
+ @utils.minimum_version('1.25')
+ def prune_images(self, filters=None):
+ """
+ Delete unused images
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+
+ Returns:
+ (dict): A dict containing a list of deleted image IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/images/prune")
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._post(url, params=params), True)
+
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
+ """
+ Pulls an image. Similar to the ``docker pull`` command.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ stream (bool): Stream the output as a generator
+ insecure_registry (bool): Use an insecure registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (generator or str): The output
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for line in cli.pull('busybox', stream=True):
+ ... print(json.dumps(json.loads(line), indent=4))
+ {
+ "status": "Pulling image (latest) from busybox",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+ {
+ "status": "Pulling image (latest) from busybox, endpoint: ...",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
@@ -181,6 +376,38 @@ class ImageApiMixin(object):
def push(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
+ """
+ Push an image or a repository to the registry. Similar to the ``docker
+ push`` command.
+
+ Args:
+ repository (str): The repository to push to
+ tag (str): An optional tag to push
+ stream (bool): Stream the output as a blocking generator
+ insecure_registry (bool): Use ``http://`` to connect to the
+ registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (generator or str): The output from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ >>> for line in cli.push('yourname/app', stream=True):
+ ... print line
+ {"status":"Pushing repository yourname/app (1 tags)"}
+ {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
+ {"status":"Image already pushed, skipping","progressDetail":{},
+ "id":"511136ea3c5a"}
+ ...
+
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
@@ -216,20 +443,63 @@ class ImageApiMixin(object):
return self._result(response)
- @utils.check_resource
+ @utils.check_resource('image')
def remove_image(self, image, force=False, noprune=False):
+ """
+ Remove an image. Similar to the ``docker rmi`` command.
+
+ Args:
+ image (str): The image to remove
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+ """
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
def search(self, term):
+ """
+ Search for images on Docker Hub. Similar to the ``docker search``
+ command.
+
+ Args:
+ term (str): A term to search for.
+
+ Returns:
+ (list of dicts): The response of the search.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
- @utils.check_resource
+ @utils.check_resource('image')
def tag(self, image, repository, tag=None, force=False):
+ """
+ Tag an image into a repository. Similar to the ``docker tag`` command.
+
+ Args:
+ image (str): The image to tag
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Returns:
+ (bool): ``True`` if successful
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ force=True)
+ """
params = {
'tag': tag,
'repo': repository,
diff --git a/docker/api/network.py b/docker/api/network.py
index 0ee0dab..5ebb41a 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -1,21 +1,39 @@
-import json
-
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
+from .. import utils
class NetworkApiMixin(object):
@minimum_version('1.21')
- def networks(self, names=None, ids=None):
- filters = {}
+ def networks(self, names=None, ids=None, filters=None):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by
+ ids (:py:class:`list`): List of ids to filter by
+ filters (dict): Filters to be processed on the network list.
+ Available filters:
+ - ``driver=[<driver-name>]`` Matches a network's driver.
+ - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``type=["custom"|"builtin"]`` Filters networks by type.
+
+ Returns:
+ (dict): List of network objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ if filters is None:
+ filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
-
- params = {'filters': json.dumps(filters)}
-
+ params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
@@ -23,7 +41,57 @@ class NetworkApiMixin(object):
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
- enable_ipv6=False):
+ enable_ipv6=False, attachable=None, scope=None,
+ ingress=None):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``True``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ attachable (bool): If enabled, and the network is in the global
+ scope, non-service containers on worker nodes will be able to
+ connect to the network.
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (dict): The created network reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.create_network("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> docker_client.create_network("network1", driver="bridge",
+ ipam=ipam_config)
+ """
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -32,7 +100,7 @@ class NetworkApiMixin(object):
'Driver': driver,
'Options': options,
'IPAM': ipam,
- 'CheckDuplicate': check_duplicate
+ 'CheckDuplicate': check_duplicate,
}
if labels is not None:
@@ -57,28 +125,106 @@ class NetworkApiMixin(object):
'supported in API version < 1.22')
data['Internal'] = True
+ if attachable is not None:
+ if version_lt(self._version, '1.24'):
+ raise InvalidVersion(
+ 'attachable is not supported in API version < 1.24'
+ )
+ data['Attachable'] = attachable
+
+ if ingress is not None:
+ if version_lt(self._version, '1.29'):
+ raise InvalidVersion(
+ 'ingress is not supported in API version < 1.29'
+ )
+
+ data['Ingress'] = ingress
+
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
+ @minimum_version('1.25')
+ def prune_networks(self, filters=None):
+ """
+ Delete unused networks
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted network names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/networks/prune')
+ return self._result(self._post(url, params=params), True)
+
@minimum_version('1.21')
+ @check_resource('net_id')
def remove_network(self, net_id):
+ """
+ Remove a network. Similar to the ``docker network rm`` command.
+
+ Args:
+ net_id (str): The network's id
+ """
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
- def inspect_network(self, net_id):
+ @check_resource('net_id')
+ def inspect_network(self, net_id, verbose=None):
+ """
+ Get detailed information about a network.
+
+ Args:
+ net_id (str): ID of network
+ verbose (bool): Show the service details across the cluster in
+ swarm mode.
+ """
+ params = {}
+ if verbose is not None:
+ if version_lt(self._version, '1.28'):
+ raise InvalidVersion('verbose was introduced in API 1.28')
+ params['verbose'] = verbose
+
url = self._url("/networks/{0}", net_id)
- res = self._get(url)
+ res = self._get(url, params=params)
return self._result(res, json=True)
- @check_resource
+ @check_resource('image')
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
+ """
+ Connect a container to a network.
+
+ Args:
+ container (str): container-id/name to be connected to the network
+ net_id (str): network id
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linked to this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local
+ (IPv4/IPv6) addresses.
+ """
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
@@ -91,10 +237,20 @@ class NetworkApiMixin(object):
res = self._post_json(url, data=data)
self._raise_for_status(res)
- @check_resource
+ @check_resource('image')
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
+ """
+ Disconnect a container from a network.
+
+ Args:
+ container (str): container ID or name to be disconnected from the
+ network
+ net_id (str): network ID
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+ """
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
new file mode 100644
index 0000000..87520cc
--- /dev/null
+++ b/docker/api/plugin.py
@@ -0,0 +1,251 @@
+import six
+
+from .. import auth, utils
+
+
+class PluginApiMixin(object):
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def configure_plugin(self, name, options):
+ """
+ Configure a plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ options (dict): A key-value mapping of options
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/set', name)
+ data = options
+ if isinstance(data, dict):
+ data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def create_plugin(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/create')
+
+ with utils.create_archive(root=plugin_data_dir, gzip=gzip) as archv:
+ res = self._post(url, params={'name': name}, data=archv)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def disable_plugin(self, name):
+ """
+ Disable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/disable', name)
+ res = self._post(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def enable_plugin(self, name, timeout=0):
+ """
+ Enable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ timeout (int): Operation timeout (in seconds). Default: 0
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/enable', name)
+ params = {'timeout': timeout}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def inspect_plugin(self, name):
+ """
+ Retrieve plugin metadata.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ A dict containing plugin info
+ """
+ url = self._url('/plugins/{0}/json', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def pull_plugin(self, remote, privileges, name=None):
+ """
+ Pull and install a plugin. After the plugin is installed, it can be
+ enabled using :py:meth:`~enable_plugin`.
+
+ Args:
+ remote (string): Remote reference for the plugin to install.
+ The ``:latest`` tag is optional, and is the default if
+ omitted.
+ privileges (list): A list of privileges the user consents to
+ grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+ name (string): Local name for the pulled plugin. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+ url = self._url('/plugins/pull')
+ params = {
+ 'remote': remote,
+ }
+ if name:
+ params['name'] = name
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
+
+ @utils.minimum_version('1.25')
+ def plugins(self):
+ """
+ Retrieve a list of installed plugins.
+
+ Returns:
+ A list of dicts, one per plugin
+ """
+ url = self._url('/plugins')
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def plugin_privileges(self, name):
+ """
+ Retrieve list of privileges to be granted to a plugin.
+
+ Args:
+ name (string): Name of the remote plugin to examine. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ A list of dictionaries representing the plugin's
+ permissions
+
+ """
+ params = {
+ 'remote': name,
+ }
+
+ url = self._url('/plugins/privileges')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def push_plugin(self, name):
+ """
+ Push a plugin to the registry.
+
+ Args:
+ name (string): Name of the plugin to upload. The ``:latest``
+ tag is optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/pull', name)
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ res = self._post(url, headers=headers)
+ self._raise_for_status(res)
+ return self._stream_helper(res, decode=True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('name')
+ def remove_plugin(self, name, force=False):
+ """
+ Remove an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to remove. The ``:latest``
+ tag is optional, and is the default if omitted.
+ force (bool): Disable the plugin before removing. This may
+ result in issues if the plugin is in use by a container.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}', name)
+ res = self._delete(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.26')
+ @utils.check_resource('name')
+ def upgrade_plugin(self, name, remote, privileges):
+ """
+ Upgrade an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to upgrade. The ``:latest``
+ tag is optional and is the default if omitted.
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ privileges (list): A list of privileges the user consents to
+ grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+
+ url = self._url('/plugins/{0}/upgrade', name)
+ params = {
+ 'remote': remote,
+ }
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
diff --git a/docker/api/secret.py b/docker/api/secret.py
new file mode 100644
index 0000000..1760a39
--- /dev/null
+++ b/docker/api/secret.py
@@ -0,0 +1,91 @@
+import base64
+
+import six
+
+from .. import utils
+
+
+class SecretApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_secret(self, name, data, labels=None):
+ """
+ Create a secret
+
+ Args:
+ name (string): Name of the secret
+ data (bytes): Secret data to be stored
+ labels (dict): A mapping of labels to assign to the secret
+
+ Returns (dict): ID of the newly created secret
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ url = self._url('/secrets/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def inspect_secret(self, id):
+ """
+ Retrieve secret metadata
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource('id')
+ def remove_secret(self, id):
+ """
+ Remove a secret
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def secrets(self, filters=None):
+ """
+ List secrets
+
+ Args:
+ filters (dict): A map of filters to process on the secrets
+ list. Available filters: ``names``
+
+ Returns (list): A list of secrets
+ """
+ url = self._url('/secrets')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/service.py b/docker/api/service.py
index baebbad..cc16cc3 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -1,14 +1,92 @@
-from .. import errors
-from .. import utils
-from ..auth import auth
+import warnings
+from .. import auth, errors, utils
+from ..types import ServiceMode
+
+
+def _check_api_features(version, task_template, update_config):
+ if update_config is not None:
+ if utils.version_lt(version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.max_failure_ratio is not supported in'
+ ' API version < 1.25'
+ )
+ if 'Monitor' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.monitor is not supported in'
+ ' API version < 1.25'
+ )
+
+ if task_template is not None:
+ if 'ForceUpdate' in task_template and utils.version_lt(
+ version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force_update is not supported in API version < 1.25'
+ )
+
+ if task_template.get('Placement'):
+ if utils.version_lt(version, '1.30'):
+ if task_template['Placement'].get('Platforms'):
+ raise errors.InvalidVersion(
+ 'Placement.platforms is not supported in'
+ ' API version < 1.30'
+ )
+
+ if utils.version_lt(version, '1.27'):
+ if task_template['Placement'].get('Preferences'):
+ raise errors.InvalidVersion(
+ 'Placement.preferences is not supported in'
+ ' API version < 1.27'
+ )
+ if task_template.container_spec.get('TTY'):
+ if utils.version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'ContainerSpec.TTY is not supported in API version < 1.25'
+ )
class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
- update_config=None, networks=None, endpoint_config=None
+ update_config=None, networks=None, endpoint_config=None,
+ endpoint_spec=None
):
+ """
+ Create a service.
+
+ Args:
+ task_template (TaskTemplate): Specification of the task to start as
+ part of the new service.
+ name (string): User-defined name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ A dictionary containing an ``ID`` key for the newly created
+ service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if endpoint_config is not None:
+ warnings.warn(
+ 'endpoint_config has been renamed to endpoint_spec.',
+ DeprecationWarning
+ )
+ endpoint_spec = endpoint_config
+
+ _check_api_features(self._version, task_template, update_config)
+
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
@@ -16,6 +94,9 @@ class ServiceApiMixin(object):
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
+ if mode and not isinstance(mode, dict):
+ mode = ServiceMode(mode)
+
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
@@ -25,29 +106,72 @@ class ServiceApiMixin(object):
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
- 'UpdateConfig': update_config,
- 'Networks': networks,
- 'Endpoint': endpoint_config
+ 'Networks': utils.convert_service_networks(networks),
+ 'EndpointSpec': endpoint_spec
}
+
+ if update_config is not None:
+ data['UpdateConfig'] = update_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('service')
def inspect_service(self, service):
+ """
+ Return information about a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/services/{0}', service)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('task')
def inspect_task(self, task):
+ """
+ Retrieve information about a task.
+
+ Args:
+ task (str): Task ID
+
+ Returns:
+ (dict): Information about the task.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('service')
def remove_service(self, service):
+ """
+ Stop and remove a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
@@ -55,14 +179,94 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def services(self, filters=None):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id`` and ``name``. Default: ``None``.
+
+ Returns:
+ A list of dictionaries containing data about each service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/services')
return self._result(self._get(url, params=params), True)
+ @utils.minimum_version('1.25')
+ @utils.check_resource('service')
+ def service_logs(self, service, details=False, follow=False, stdout=False,
+ stderr=False, since=0, timestamps=False, tail='all',
+ is_tty=None):
+ """
+ Get log stream for a service.
+ Note: This endpoint works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ service (str): ID or name of the service
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+ is_tty (bool): Whether the service's :py:class:`ContainerSpec`
+ enables the TTY option. If omitted, the method will query
+ the Engine for the information, causing an additional
+ roundtrip.
+
+ Returns (generator): Logs for the service.
+ """
+ params = {
+ 'details': details,
+ 'follow': follow,
+ 'stdout': stdout,
+ 'stderr': stderr,
+ 'since': since,
+ 'timestamps': timestamps,
+ 'tail': tail
+ }
+
+ url = self._url('/services/{0}/logs', service)
+ res = self._get(url, params=params, stream=True)
+ if is_tty is None:
+ is_tty = self.inspect_service(
+ service
+ )['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
+ return self._get_result_tty(True, res, is_tty)
+
@utils.minimum_version('1.24')
def tasks(self, filters=None):
+ """
+ Retrieve a list of tasks.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``service``, ``node``,
+ ``label`` and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -70,10 +274,49 @@ class ServiceApiMixin(object):
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
- @utils.check_resource
+ @utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
- networks=None, endpoint_config=None):
+ networks=None, endpoint_config=None,
+ endpoint_spec=None):
+ """
+ Update a service.
+
+ Args:
+ service (string): A service identifier (either its name or service
+ ID).
+ version (int): The version number of the service object being
+ updated. This is required to avoid conflicting writes.
+ task_template (TaskTemplate): Specification of the updated task to
+ start as part of the service.
+ name (string): New name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (ServiceMode): Scheduling mode for the service (replicated
+ or global). Defaults to replicated.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if endpoint_config is not None:
+ warnings.warn(
+ 'endpoint_config has been renamed to endpoint_spec.',
+ DeprecationWarning
+ )
+ endpoint_spec = endpoint_config
+
+ _check_api_features(self._version, task_template, update_config)
+
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
@@ -82,6 +325,8 @@ class ServiceApiMixin(object):
if labels is not None:
data['Labels'] = labels
if mode is not None:
+ if not isinstance(mode, dict):
+ mode = ServiceMode(mode)
data['Mode'] = mode
if task_template is not None:
image = task_template.get('ContainerSpec', {}).get('Image', None)
@@ -93,10 +338,11 @@ class ServiceApiMixin(object):
data['TaskTemplate'] = task_template
if update_config is not None:
data['UpdateConfig'] = update_config
+
if networks is not None:
- data['Networks'] = networks
- if endpoint_config is not None:
- data['Endpoint'] = endpoint_config
+ data['Networks'] = utils.convert_service_networks(networks)
+ if endpoint_spec is not None:
+ data['EndpointSpec'] = endpoint_spec
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index d099364..4fa0c4a 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,16 +1,94 @@
-from .. import utils
import logging
+from six.moves import http_client
+from .. import types
+from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
- return utils.SwarmSpec(*args, **kwargs)
+ """
+ Create a ``docker.types.SwarmSpec`` instance that can be used as the
+ ``swarm_spec`` argument in
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
+
+ Args:
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+
+ Returns:
+ ``docker.types.SwarmSpec`` instance.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> spec = client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ >>> client.init_swarm(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, swarm_spec=spec
+ )
+ """
+ return types.SwarmSpec(*args, **kwargs)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
+ """
+ Initialize a new Swarm using the current connected engine as the first
+ node.
+
+ Args:
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ ``advertise_addr`` is not specified, it will be automatically
+ detected when possible. Default: None
+ listen_addr (string): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: '0.0.0.0:2377'
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ swarm_spec (dict): Configuration settings of the new Swarm. Use
+ ``APIClient.create_swarm_spec`` to generate a valid
+ configuration. Default: None
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
@@ -26,18 +104,67 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def inspect_swarm(self):
+ """
+ Retrieve low-level information about the current swarm.
+
+ Returns:
+ A dictionary containing data about the swarm.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm')
return self._result(self._get(url), True)
- @utils.check_resource
+ @utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
+ """
+ Retrieve low-level information about a swarm node
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A dictionary containing data about this node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
advertise_addr=None):
+ """
+ Make this Engine join a swarm that has already been created.
+
+ Args:
+ remote_addrs (:py:class:`list`): Addresses of one or more manager
+ nodes already participating in the Swarm to join.
+ join_token (string): Secret token for joining this Swarm.
+ listen_addr (string): Listen address used for inter-manager
+ communication if the node gets promoted to manager, as well as
+ determining the networking interface used for the VXLAN Tunnel
+ Endpoint (VTEP). Default: ``None``
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ AdvertiseAddr is not specified, it will be automatically
+ detected when possible. Default: ``None``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
@@ -51,13 +178,49 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
+ """
+ Leave a swarm.
+
+ Args:
+ force (bool): Leave the swarm even if this node is a manager.
+ Default: ``False``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
+ # Ignore "this node is not part of a swarm" error
+ if force and response.status_code == http_client.NOT_ACCEPTABLE:
+ return True
+ # FIXME: Temporary workaround for 1.13.0-rc bug
+ # https://github.com/docker/docker/issues/29192
+ if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
+ return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of dictionaries containing data about each swarm node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes')
params = {}
if filters:
@@ -65,9 +228,94 @@ class SwarmApiMixin(object):
return self._result(self._get(url, params=params), True)
+ @utils.check_resource('node_id')
+ @utils.minimum_version('1.24')
+ def remove_node(self, node_id, force=False):
+ """
+ Remove a node from the swarm.
+
+ Args:
+ node_id (string): ID of the node to be removed.
+ force (bool): Force remove an active node. Default: `False`
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node referenced doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ Returns:
+ `True` if the request was successful.
+ """
+ url = self._url('/nodes/{0}', node_id)
+ params = {
+ 'force': force
+ }
+ res = self._delete(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.24')
+ def update_node(self, node_id, version, node_spec=None):
+ """
+ Update the Node's configuration
+
+ Args:
+
+ node_id (string): ID of the node to be updated.
+ version (int): The version number of the node object being
+ updated. This is required to avoid conflicting writes.
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ node_spec=node_spec)
+
+ """
+ url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
+ res = self._post_json(url, data=node_spec)
+ self._raise_for_status(res)
+ return True
+
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
+ """
+ Update the Swarm's configuration
+
+ Args:
+ version (int): The version number of the swarm object being
+ updated. This is required to avoid conflicting writes.
+ swarm_spec (dict): Configuration settings to update. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
+ generate a valid configuration. Default: ``None``.
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
diff --git a/docker/api/volume.py b/docker/api/volume.py
index afc72cb..ce911c8 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -5,6 +5,32 @@ from .. import utils
class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def volumes(self, filters=None):
+ """
+ List volumes currently registered by the docker daemon. Similar to the
+ ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (dict): Dictionary with list of volume objects as value of the
+ ``Volumes`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.volumes()
+ {u'Volumes': [{u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'},
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
+ u'Name': u'baz'}]}
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -12,7 +38,37 @@ class VolumeApiMixin(object):
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.21')
- def create_volume(self, name, driver=None, driver_opts=None, labels=None):
+ def create_volume(self, name=None, driver=None, driver_opts=None,
+ labels=None):
+ """
+ Create and register a named volume
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (dict): The created volume reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = cli.create_volume(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+ >>> print(volume)
+ {u'Driver': u'local',
+ u'Labels': {u'key': u'value'},
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar',
+ u'Scope': u'local'}
+
+ """
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
@@ -36,11 +92,74 @@ class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def inspect_volume(self, name):
+ """
+ Retrieve volume info by name.
+
+ Args:
+ name (str): volume name
+
+ Returns:
+ (dict): Volume information dictionary
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.inspect_volume('foobar')
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
+ @utils.minimum_version('1.25')
+ def prune_volumes(self, filters=None):
+ """
+ Delete unused volumes
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted volume names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/volumes/prune')
+ return self._result(self._post(url, params=params), True)
+
@utils.minimum_version('1.21')
- def remove_volume(self, name):
- url = self._url('/volumes/{0}', name)
+ def remove_volume(self, name, force=False):
+ """
+ Remove a volume. Similar to the ``docker volume rm`` command.
+
+ Args:
+ name (str): The volume's name
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ params = {}
+ if force:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force removal was introduced in API 1.25'
+ )
+ params = {'force': force}
+
+ url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
diff --git a/docker/auth/auth.py b/docker/auth.py
index dc0baea..ec9c45b 100644
--- a/docker/auth/auth.py
+++ b/docker/auth.py
@@ -6,7 +6,8 @@ import os
import dockerpycreds
import six
-from .. import errors
+from . import errors
+from .constants import IS_WINDOWS_PLATFORM
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
@@ -69,6 +70,15 @@ def split_repo_name(repo_name):
return tuple(parts)
+def get_credential_store(authconfig, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = 'https://index.docker.io/v1/'
+
+ return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
+ 'credsStore'
+ )
+
+
def resolve_authconfig(authconfig, registry=None):
"""
Returns the authentication data from the given auth configuration for a
@@ -76,13 +86,17 @@ def resolve_authconfig(authconfig, registry=None):
with full URLs are stripped down to hostnames before checking for a match.
Returns None if no match was found.
"""
- if 'credsStore' in authconfig:
- log.debug(
- 'Using credentials store "{0}"'.format(authconfig['credsStore'])
- )
- return _resolve_authconfig_credstore(
- authconfig, registry, authconfig['credsStore']
- )
+
+ if 'credHelpers' in authconfig or 'credsStore' in authconfig:
+ store_name = get_credential_store(authconfig, registry)
+ if store_name is not None:
+ log.debug(
+ 'Using credentials store "{0}"'.format(store_name)
+ )
+ return _resolve_authconfig_credstore(
+ authconfig, registry, store_name
+ )
+
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
@@ -210,19 +224,12 @@ def parse_auth(entries, raise_on_error=False):
def find_config_file(config_path=None):
- environment_path = os.path.join(
- os.environ.get('DOCKER_CONFIG'),
- os.path.basename(DOCKER_CONFIG_FILENAME)
- ) if os.environ.get('DOCKER_CONFIG') else None
-
- paths = filter(None, [
+ paths = list(filter(None, [
config_path, # 1
- environment_path, # 2
- os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
- os.path.join(
- os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
- ) # 4
- ])
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
log.debug("Trying paths: {0}".format(repr(paths)))
@@ -236,6 +243,24 @@ def find_config_file(config_path=None):
return None
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
def load_config(config_path=None):
"""
Loads authentication data from a Docker configuration file in the given
@@ -262,6 +287,9 @@ def load_config(config_path=None):
if data.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': data['credsStore']})
+ if data.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': data['credHelpers']})
if res:
return res
else:
diff --git a/docker/auth/__init__.py b/docker/auth/__init__.py
deleted file mode 100644
index 6fc83f8..0000000
--- a/docker/auth/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .auth import (
- INDEX_NAME,
- INDEX_URL,
- encode_header,
- load_config,
- resolve_authconfig,
- resolve_repository_name,
-) # flake8: noqa \ No newline at end of file
diff --git a/docker/client.py b/docker/client.py
index 3fa19e0..ee361bb 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,406 +1,190 @@
-import json
-import struct
-from functools import partial
-
-import requests
-import requests.exceptions
-import six
-import websocket
-
-
-from . import api
-from . import constants
-from . import errors
-from .auth import auth
-from .ssladapter import ssladapter
-from .tls import TLSConfig
-from .transport import UnixAdapter
-from .utils import utils, check_resource, update_headers, kwargs_from_env
-from .utils.socket import frames_iter
-try:
- from .transport import NpipeAdapter
-except ImportError:
- pass
-
-
-def from_env(**kwargs):
- return Client.from_env(**kwargs)
-
-
-class Client(
- requests.Session,
- api.BuildApiMixin,
- api.ContainerApiMixin,
- api.DaemonApiMixin,
- api.ExecApiMixin,
- api.ImageApiMixin,
- api.NetworkApiMixin,
- api.ServiceApiMixin,
- api.SwarmApiMixin,
- api.VolumeApiMixin):
- def __init__(self, base_url=None, version=None,
- timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=constants.DEFAULT_USER_AGENT,
- num_pools=constants.DEFAULT_NUM_POOLS):
- super(Client, self).__init__()
-
- if tls and not base_url:
- raise errors.TLSParameterError(
- 'If using TLS, the base_url argument must be provided.'
- )
-
- self.base_url = base_url
- self.timeout = timeout
- self.headers['User-Agent'] = user_agent
-
- self._auth_configs = auth.load_config()
-
- base_url = utils.parse_host(
- base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
- )
- if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
- base_url, timeout, num_pools=num_pools
- )
- self.mount('http+docker://', self._custom_adapter)
- self._unmount('http://', 'https://')
- self.base_url = 'http+docker://localunixsocket'
- elif base_url.startswith('npipe://'):
- if not constants.IS_WINDOWS_PLATFORM:
- raise errors.DockerException(
- 'The npipe:// protocol is only supported on Windows'
- )
- try:
- self._custom_adapter = NpipeAdapter(
- base_url, timeout, num_pools=num_pools
- )
- except NameError:
- raise errors.DockerException(
- 'Install pypiwin32 package to enable npipe:// support'
- )
- self.mount('http+docker://', self._custom_adapter)
- self.base_url = 'http+docker://localnpipe'
- else:
- # Use SSLAdapter for the ability to specify SSL version
- if isinstance(tls, TLSConfig):
- tls.configure_client(self)
- elif tls:
- self._custom_adapter = ssladapter.SSLAdapter(
- pool_connections=num_pools
- )
- self.mount('https://', self._custom_adapter)
- self.base_url = base_url
-
- # version detection needs to be after unix adapter mounting
- if version is None:
- self._version = constants.DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
- else:
- raise errors.DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
- type(version).__name__
- )
- )
+from .api.client import APIClient
+from .constants import DEFAULT_TIMEOUT_SECONDS
+from .models.containers import ContainerCollection
+from .models.images import ImageCollection
+from .models.networks import NetworkCollection
+from .models.nodes import NodeCollection
+from .models.plugins import PluginCollection
+from .models.secrets import SecretCollection
+from .models.services import ServiceCollection
+from .models.swarm import Swarm
+from .models.volumes import VolumeCollection
+from .utils import kwargs_from_env
+
+
+class DockerClient(object):
+ """
+ A client for communicating with a Docker server.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.26``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+ def __init__(self, *args, **kwargs):
+ self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
+ """
+ Return a client configured from environment variables.
+
+ The environment variables used are the same as those used by the
+ Docker command-line client. They are:
+
+ .. envvar:: DOCKER_HOST
+
+ The URL to the Docker host.
+
+ .. envvar:: DOCKER_TLS_VERIFY
+
+ Verify the host against a CA certificate.
+
+ .. envvar:: DOCKER_CERT_PATH
+
+ A path to a directory containing TLS certificates to use when
+ connecting to the Docker host.
+
+ Args:
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.26``
+ timeout (int): Default timeout for API calls, in seconds.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+ environment (dict): The environment to read environment variables
+ from. Default: the value of ``os.environ``
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.from_env()
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
version = kwargs.pop('version', None)
- return cls(version=version, **kwargs_from_env(**kwargs))
-
- def _retrieve_server_version(self):
- try:
- return self.version(api_version=False)["ApiVersion"]
- except KeyError:
- raise errors.DockerException(
- 'Invalid response from docker daemon: key "ApiVersion"'
- ' is missing.'
- )
- except Exception as e:
- raise errors.DockerException(
- 'Error while fetching server API version: {0}'.format(e)
- )
-
- def _set_request_timeout(self, kwargs):
- """Prepare the kwargs for an HTTP request by inserting the timeout
- parameter, if not already present."""
- kwargs.setdefault('timeout', self.timeout)
- return kwargs
-
- @update_headers
- def _post(self, url, **kwargs):
- return self.post(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _get(self, url, **kwargs):
- return self.get(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _put(self, url, **kwargs):
- return self.put(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _delete(self, url, **kwargs):
- return self.delete(url, **self._set_request_timeout(kwargs))
-
- def _url(self, pathfmt, *args, **kwargs):
- for arg in args:
- if not isinstance(arg, six.string_types):
- raise ValueError(
- 'Expected a string but found {0} ({1}) '
- 'instead'.format(arg, type(arg))
- )
-
- quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
- args = map(quote_f, args)
-
- if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
- self.base_url, self._version, pathfmt.format(*args)
- )
- else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
-
- def _raise_for_status(self, response, explanation=None):
- """Raises stored :class:`APIError`, if one occurred."""
- try:
- response.raise_for_status()
- except requests.exceptions.HTTPError as e:
- if e.response.status_code == 404:
- raise errors.NotFound(e, response, explanation=explanation)
- raise errors.APIError(e, response, explanation=explanation)
-
- def _result(self, response, json=False, binary=False):
- assert not (json and binary)
- self._raise_for_status(response)
-
- if json:
- return response.json()
- if binary:
- return response.content
- return response.text
-
- def _post_json(self, url, data, **kwargs):
- # Go <1.1 can't unserialize null to a string
- # so we do this disgusting thing here.
- data2 = {}
- if data is not None:
- for k, v in six.iteritems(data):
- if v is not None:
- data2[k] = v
-
- if 'headers' not in kwargs:
- kwargs['headers'] = {}
- kwargs['headers']['Content-Type'] = 'application/json'
- return self._post(url, data=json.dumps(data2), **kwargs)
-
- def _attach_params(self, override=None):
- return override or {
- 'stdout': 1,
- 'stderr': 1,
- 'stream': 1
- }
-
- @check_resource
- def _attach_websocket(self, container, params=None):
- url = self._url("/containers/{0}/attach/ws", container)
- req = requests.Request("POST", url, params=self._attach_params(params))
- full_url = req.prepare().url
- full_url = full_url.replace("http://", "ws://", 1)
- full_url = full_url.replace("https://", "wss://", 1)
- return self._create_websocket_connection(full_url)
-
- def _create_websocket_connection(self, url):
- return websocket.create_connection(url)
-
- def _get_raw_response_socket(self, response):
- self._raise_for_status(response)
- if self.base_url == "http+docker://localnpipe":
- sock = response.raw._fp.fp.raw.sock
- elif six.PY3:
- sock = response.raw._fp.fp.raw
- if self.base_url.startswith("https://"):
- sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
- try:
- # Keep a reference to the response to stop it being garbage
- # collected. If the response is garbage collected, it will
- # close TLS sockets.
- sock._response = response
- except AttributeError:
- # UNIX sockets can't have attributes set on them, but that's
- # fine because we won't be doing TLS over them
- pass
-
- return sock
-
- def _stream_helper(self, response, decode=False):
- """Generator for data coming from a chunked-encoded HTTP response."""
- if response.raw._fp.chunked:
- reader = response.raw
- while not reader.closed:
- # this read call will block until we get a chunk
- data = reader.read(1)
- if not data:
- break
- if reader._fp.chunk_left:
- data += reader.read(reader._fp.chunk_left)
- if decode:
- if six.PY3:
- data = data.decode('utf-8')
- # remove the trailing newline
- data = data.strip()
- # split the data at any newlines
- data_list = data.split("\r\n")
- # load and yield each line seperately
- for data in data_list:
- data = json.loads(data)
- yield data
- else:
- yield data
- else:
- # Response isn't chunked, meaning we probably
- # encountered an error immediately
- yield self._result(response, json=decode)
-
- def _multiplexed_buffer_helper(self, response):
- """A generator of multiplexed data blocks read from a buffered
- response."""
- buf = self._result(response, binary=True)
- walker = 0
- while True:
- if len(buf[walker:]) < 8:
- break
- _, length = struct.unpack_from('>BxxxL', buf[walker:])
- start = walker + constants.STREAM_HEADER_SIZE_BYTES
- end = start + length
- walker = end
- yield buf[start:end]
-
- def _multiplexed_response_stream_helper(self, response):
- """A generator of multiplexed data blocks coming from a response
- stream."""
-
- # Disable timeout on the underlying socket to prevent
- # Read timed out(s) for long running processes
- socket = self._get_raw_response_socket(response)
- self._disable_socket_timeout(socket)
-
- while True:
- header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
- if not header:
- break
- _, length = struct.unpack('>BxxxL', header)
- if not length:
- continue
- data = response.raw.read(length)
- if not data:
- break
- yield data
-
- def _stream_raw_result_old(self, response):
- ''' Stream raw output for API versions below 1.6 '''
- self._raise_for_status(response)
- for line in response.iter_lines(chunk_size=1,
- decode_unicode=True):
- # filter out keep-alive new lines
- if line:
- yield line
-
- def _stream_raw_result(self, response):
- ''' Stream result for TTY-enabled container above API 1.6 '''
- self._raise_for_status(response)
- for out in response.iter_content(chunk_size=1, decode_unicode=True):
- yield out
-
- def _read_from_socket(self, response, stream):
- socket = self._get_raw_response_socket(response)
-
- if stream:
- return frames_iter(socket)
- else:
- return six.binary_type().join(frames_iter(socket))
-
- def _disable_socket_timeout(self, socket):
- """ Depending on the combination of python version and whether we're
- connecting over http or https, we might need to access _sock, which
- may or may not exist; or we may need to just settimeout on socket
- itself, which also may or may not have settimeout on it. To avoid
- missing the correct one, we try both.
-
- We also do not want to set the timeout if it is already disabled, as
- you run the risk of changing a socket that was non-blocking to
- blocking, for example when using gevent.
+ return cls(timeout=timeout, version=version,
+ **kwargs_from_env(**kwargs))
+
+ # Resources
+ @property
+ def containers(self):
+ """
+ An object for managing containers on the server. See the
+ :doc:`containers documentation <containers>` for full details.
+ """
+ return ContainerCollection(client=self)
+
+ @property
+ def images(self):
+ """
+ An object for managing images on the server. See the
+ :doc:`images documentation <images>` for full details.
"""
- sockets = [socket, getattr(socket, '_sock', None)]
-
- for s in sockets:
- if not hasattr(s, 'settimeout'):
- continue
-
- timeout = -1
-
- if hasattr(s, 'gettimeout'):
- timeout = s.gettimeout()
-
- # Don't change the timeout if it is already disabled.
- if timeout is None or timeout == 0.0:
- continue
-
- s.settimeout(None)
-
- def _get_result(self, container, stream, res):
- cont = self.inspect_container(container)
- return self._get_result_tty(stream, res, cont['Config']['Tty'])
-
- def _get_result_tty(self, stream, res, is_tty):
- # Stream multi-plexing was only introduced in API v1.6. Anything
- # before that needs old-style streaming.
- if utils.compare_version('1.6', self._version) < 0:
- return self._stream_raw_result_old(res)
-
- # We should also use raw streaming (without keep-alives)
- # if we're dealing with a tty-enabled container.
- if is_tty:
- return self._stream_raw_result(res) if stream else \
- self._result(res, binary=True)
-
- self._raise_for_status(res)
- sep = six.binary_type()
- if stream:
- return self._multiplexed_response_stream_helper(res)
- else:
- return sep.join(
- [x for x in self._multiplexed_buffer_helper(res)]
- )
-
- def _unmount(self, *args):
- for proto in args:
- self.adapters.pop(proto)
-
- def get_adapter(self, url):
- try:
- return super(Client, self).get_adapter(url)
- except requests.exceptions.InvalidSchema as e:
- if self._custom_adapter:
- return self._custom_adapter
- else:
- raise e
+ return ImageCollection(client=self)
@property
- def api_version(self):
- return self._version
+ def networks(self):
+ """
+ An object for managing networks on the server. See the
+ :doc:`networks documentation <networks>` for full details.
+ """
+ return NetworkCollection(client=self)
+ @property
+ def nodes(self):
+ """
+ An object for managing nodes on the server. See the
+ :doc:`nodes documentation <nodes>` for full details.
+ """
+ return NodeCollection(client=self)
-class AutoVersionClient(Client):
- def __init__(self, *args, **kwargs):
- if 'version' in kwargs and kwargs['version']:
- raise errors.DockerException(
- 'Can not specify version for AutoVersionClient'
- )
- kwargs['version'] = 'auto'
- super(AutoVersionClient, self).__init__(*args, **kwargs)
+ @property
+ def plugins(self):
+ """
+ An object for managing plugins on the server. See the
+ :doc:`plugins documentation <plugins>` for full details.
+ """
+ return PluginCollection(client=self)
+
+ @property
+ def secrets(self):
+ """
+ An object for managing secrets on the server. See the
+ :doc:`secrets documentation <secrets>` for full details.
+ """
+ return SecretCollection(client=self)
+
+ @property
+ def services(self):
+ """
+ An object for managing services on the server. See the
+ :doc:`services documentation <services>` for full details.
+ """
+ return ServiceCollection(client=self)
+
+ @property
+ def swarm(self):
+ """
+ An object for managing a swarm on the server. See the
+ :doc:`swarm documentation <swarm>` for full details.
+ """
+ return Swarm(client=self)
+
+ @property
+ def volumes(self):
+ """
+ An object for managing volumes on the server. See the
+ :doc:`volumes documentation <volumes>` for full details.
+ """
+ return VolumeCollection(client=self)
+
+ # Top-level methods
+ def events(self, *args, **kwargs):
+ return self.api.events(*args, **kwargs)
+ events.__doc__ = APIClient.events.__doc__
+
+ def df(self):
+ return self.api.df()
+ df.__doc__ = APIClient.df.__doc__
+
+ def info(self, *args, **kwargs):
+ return self.api.info(*args, **kwargs)
+ info.__doc__ = APIClient.info.__doc__
+
+ def login(self, *args, **kwargs):
+ return self.api.login(*args, **kwargs)
+ login.__doc__ = APIClient.login.__doc__
+
+ def ping(self, *args, **kwargs):
+ return self.api.ping(*args, **kwargs)
+ ping.__doc__ = APIClient.ping.__doc__
+
+ def version(self, *args, **kwargs):
+ return self.api.version(*args, **kwargs)
+ version.__doc__ = APIClient.version.__doc__
+
+ def __getattr__(self, name):
+ s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ # If a user calls a method on APIClient, they
+ if hasattr(APIClient, name):
+ s.append("In Docker SDK for Python 2.0, this method is now on the "
+ "object APIClient. See the low-level API section of the "
+ "documentation for more details.")
+ raise AttributeError(' '.join(s))
+
+
+from_env = DockerClient.from_env
diff --git a/docker/constants.py b/docker/constants.py
index 0c9a020..91a6528 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,8 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.24'
+DEFAULT_DOCKER_API_VERSION = '1.26'
+MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
@@ -14,5 +15,5 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
-DEFAULT_USER_AGENT = "docker-py/{0}".format(version)
+DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25
diff --git a/docker/errors.py b/docker/errors.py
index 97be802..0da97f4 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -1,18 +1,47 @@
import requests
-class APIError(requests.exceptions.HTTPError):
- def __init__(self, message, response, explanation=None):
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = response.content.strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and ('No such image' in str(explanation) or
+ 'not found: does not exist or no pull access'
+ in str(explanation) or
+ 'repository does not exist' in str(explanation)):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise cls(e, response=response, explanation=explanation)
+
+
+class APIError(requests.exceptions.HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
-
self.explanation = explanation
- if self.explanation is None and response.content:
- self.explanation = response.content.strip()
-
def __str__(self):
message = super(APIError, self).__str__()
@@ -29,18 +58,27 @@ class APIError(requests.exceptions.HTTPError):
return message
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
def is_client_error(self):
- return 400 <= self.response.status_code < 500
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
def is_server_error(self):
- return 500 <= self.response.status_code < 600
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
-class DockerException(Exception):
+class NotFound(APIError):
pass
-class NotFound(APIError):
+class ImageNotFound(NotFound):
pass
@@ -56,6 +94,10 @@ class InvalidConfigFile(DockerException):
pass
+class InvalidArgument(DockerException):
+ pass
+
+
class DeprecatedMethod(DockerException):
pass
@@ -73,3 +115,38 @@ class TLSParameterError(DockerException):
class NullResource(DockerException, ValueError):
pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
+ "{}").format(command, image, exit_status, stderr)
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(Exception):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
+ text = ["{}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
diff --git a/docker/models/__init__.py b/docker/models/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/docker/models/__init__.py
diff --git a/docker/models/containers.py b/docker/models/containers.py
new file mode 100644
index 0000000..cf01b27
--- /dev/null
+++ b/docker/models/containers.py
@@ -0,0 +1,949 @@
+import copy
+
+from ..api import APIClient
+from ..errors import (ContainerError, ImageNotFound,
+ create_unexpected_kwargs_error)
+from ..types import HostConfig
+from .images import Image
+from .resource import Collection, Model
+
+
+class Container(Model):
+
+ @property
+ def name(self):
+ """
+ The name of the container.
+ """
+ if self.attrs.get('Name') is not None:
+ return self.attrs['Name'].lstrip('/')
+
+ @property
+ def image(self):
+ """
+ The image of the container.
+ """
+ image_id = self.attrs['Image']
+ if image_id is None:
+ return None
+ return self.client.images.get(image_id.split(':')[1])
+
+ @property
+ def labels(self):
+ """
+ The labels of a container as dictionary.
+ """
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+
+ @property
+ def status(self):
+ """
+ The status of the container. For example, ``running``, or ``exited``.
+ """
+ return self.attrs['State']['Status']
+
+ def attach(self, **kwargs):
+ """
+ Attach to this container.
+
+ :py:meth:`logs` is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach(self.id, **kwargs)
+
+ def attach_socket(self, **kwargs):
+ """
+ Like :py:meth:`attach`, but returns the underlying socket-like object
+ for the HTTP request.
+
+ Args:
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach_socket(self.id, **kwargs)
+
+ def commit(self, repository=None, tag=None, **kwargs):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Engine API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ resp = self.client.api.commit(self.id, repository=repository, tag=tag,
+ **kwargs)
+ return self.client.images.get(resp['Id'])
+
+ def diff(self):
+ """
+ Inspect changes on a container's filesystem.
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.diff(self.id)
+
+ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
+ privileged=False, user='', detach=False, stream=False,
+ socket=False, environment=None):
+ """
+ Run a command inside this container. Similar to
+ ``docker exec``.
+
+ Args:
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ stream (bool): Stream response data. Default: False
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.exec_create(
+ self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
+ privileged=privileged, user=user, environment=environment
+ )
+ return self.client.api.exec_start(
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ )
+
+ def export(self):
+ """
+ Export the contents of the container's filesystem as a tar archive.
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.export(self.id)
+
+ def get_archive(self, path):
+ """
+ Retrieve a file or folder from the container in the form of a tar
+ archive.
+
+ Args:
+ path (str): Path to the file or folder to retrieve
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.get_archive(self.id, path)
+
+ def kill(self, signal=None):
+ """
+ Kill or send a signal to the container.
+
+ Args:
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self.client.api.kill(self.id, signal=signal)
+
+ def logs(self, **kwargs):
+ """
+ Get logs from this container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+
+ Returns:
+ (generator or str): Logs from the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.logs(self.id, **kwargs)
+
+ def pause(self):
+ """
+ Pauses all processes within this container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.pause(self.id)
+
+ def put_archive(self, path, data):
+ """
+ Insert a file or folder in this container using a tar archive as
+ source.
+
+ Args:
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
+ return self.client.api.put_archive(self.id, path, data)
+
+ def remove(self, **kwargs):
+ """
+ Remove this container. Similar to the ``docker rm`` command.
+
+ Args:
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_container(self.id, **kwargs)
+
+ def rename(self, name):
+ """
+ Rename this container. Similar to the ``docker rename`` command.
+
+ Args:
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.rename(self.id, name)
+
+ def resize(self, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.resize(self.id, height, width)
+
+ def restart(self, **kwargs):
+ """
+ Restart this container. Similar to the ``docker restart`` command.
+
+ Args:
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.restart(self.id, **kwargs)
+
+ def start(self, **kwargs):
+ """
+ Start this container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.start(self.id, **kwargs)
+
+ def stats(self, **kwargs):
+ """
+ Stream statistics for this container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stats(self.id, **kwargs)
+
+ def stop(self, **kwargs):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stop(self.id, **kwargs)
+
+ def top(self, **kwargs):
+ """
+ Display the running processes of the container.
+
+ Args:
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.top(self.id, **kwargs)
+
+ def unpause(self):
+ """
+ Unpause all processes within the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.unpause(self.id)
+
+ def update(self, **kwargs):
+ """
+ Update resource configuration of the containers.
+
+ Args:
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.update_container(self.id, **kwargs)
+
+ def wait(self, **kwargs):
+ """
+ Block until the container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ timeout (int): Request timeout
+
+ Returns:
+ (int): The exit code of the container. Returns ``-1`` if the API
+ responds without a ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.wait(self.id, **kwargs)
+
+
+class ContainerCollection(Collection):
+ model = Container
+
+ def run(self, image, command=None, stdout=True, stderr=False,
+ remove=False, **kwargs):
+ """
+ Run a container. By default, it will wait for the container to finish
+ and return its logs, similar to ``docker run``.
+
+ If the ``detach`` argument is ``True``, it will start the container
+ and immediately return a :py:class:`Container` object, similar to
+ ``docker run -d``.
+
+ Example:
+ Run a container and get its output:
+
+ >>> import docker
+ >>> client = docker.from_env()
+ >>> client.containers.run('alpine', 'echo hello world')
+ b'hello world\\n'
+
+ Run a container and detach:
+
+ >>> container = client.containers.run('bfirsh/reticulate-splines',
+ detach=True)
+ >>> container.logs()
+ 'Reticulating spline 1...\\nReticulating spline 2...\\n'
+
+ Args:
+ image (str): The image to run.
+ command (str or list): The command to run in the container.
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_count (int): Number of usable CPUs (Windows only).
+ cpu_percent (int): Usable percentage of the available CPUs
+ (Windows only).
+ cpu_period (int): The length of a CPU period in microseconds.
+ cpu_quota (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
+ (``0-3``, ``0,1``). Only effective on NUMA systems.
+ detach (bool): Run container in the background and return a
+ :py:class:`Container` object.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (:py:class:`list`): Expose host devices to the container,
+ as a list of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (:py:class:`list`): Set custom DNS servers.
+ dns_opt (:py:class:`list`): Additional options to be added to the
+ container's ``resolv.conf`` file.
+ dns_search (:py:class:`list`): DNS search domains.
+ domainname (str or list): Set custom DNS search domains.
+ entrypoint (str or list): The entrypoint for the container.
+ environment (dict or list): Environment variables to set inside
+ the container, as a dictionary or a list of strings in the
+ format ``["SOMEVARIABLE=xxx"]``.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (:py:class:`list`): List of additional group names and/or
+ IDs that the container process will run as.
+ healthcheck (dict): Specify a test to perform to check that the
+ container is healthy.
+ hostname (str): Optional hostname for the container.
+ init (bool): Run an init inside the container that forwards
+ signals and reaps processes
+ init_path (str): Path to the docker-init binary
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ mac_address (str): MAC address to assign to the container.
+ mem_limit (int or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ intended unit.
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ name (str): The name for this container.
+ nano_cpus (int): CPU quota in units of 10-9 CPUs.
+ network (str): Name of the network this container will be connected
+ to at creation time. You can connect to additional networks
+ using :py:meth:`Network.connect`. Incompatible with
+ ``network_mode``.
+ network_disabled (bool): Disable networking.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ Incompatible with ``network``.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ ports (dict): Ports to bind inside the container.
+
+ The keys of the dictionary are the ports to bind inside the
+ container, either as an integer or a string in the form
+ ``port/protocol``, where the protocol is either ``tcp`` or
+ ``udp``.
+
+ The values of the dictionary are the corresponding ports to
+ open on the host, which can be either:
+
+ - The port number, as an integer. For example,
+ ``{'2222/tcp': 3333}`` will expose port 2222 inside the
+ container as port 3333 on the host.
+ - ``None``, to assign a random host port. For example,
+ ``{'2222/tcp': None}``.
+ - A tuple of ``(address, port)`` if you want to specify the
+ host interface. For example,
+ ``{'1111/tcp': ('127.0.0.1', 1111)}``.
+ - A list of integers, if you want to bind multiple host ports
+ to a single container port. For example,
+ ``{'1111/tcp': [1234, 4567]}``.
+
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ remove (bool): Remove the container when it has finished running.
+ Default: ``False``.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+
+ For example:
+ ``{"Name": "on-failure", "MaximumRetryCount": 5}``
+
+ security_opt (:py:class:`list`): A list of string values to
+ customize labels for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ stdin_open (bool): Keep ``STDIN`` open even if not attached.
+ stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
+ Default: ``True``.
+ stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
+ Default: ``False``.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ tty (bool): Allocate a pseudo-TTY.
+ ulimits (:py:class:`list`): Ulimits to set inside the container, as
+ a list of dicts.
+ user (str or int): Username or UID to run commands as inside the
+ container.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volume_driver (str): The name of a volume driver/plugin.
+ volumes (dict or list): A dictionary to configure volumes mounted
+ inside the container. The key is either the host path or a
+ volume name, and the value is a dictionary with the keys:
+
+ - ``bind`` The path to mount the volume inside the container
+ - ``mode`` Either ``rw`` to mount the volume read/write, or
+ ``ro`` to mount it read-only.
+
+ For example:
+
+ .. code-block:: python
+
+ {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
+ '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+
+ volumes_from (:py:class:`list`): List of container names or IDs to
+ get volumes from.
+ working_dir (str): Path to the working directory.
+ runtime (str): Runtime to use with this container.
+
+ Returns:
+ The container logs, either ``STDOUT``, ``STDERR``, or both,
+ depending on the value of the ``stdout`` and ``stderr`` arguments.
+
+ If ``detach`` is ``True``, a :py:class:`Container` object is
+ returned instead.
+
+ Raises:
+ :py:class:`docker.errors.ContainerError`
+ If the container exits with a non-zero exit code and
+ ``detach`` is ``False``.
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ detach = kwargs.pop("detach", False)
+ if detach and remove:
+ raise RuntimeError("The options 'detach' and 'remove' cannot be "
+ "used together.")
+
+ if kwargs.get('network') and kwargs.get('network_mode'):
+ raise RuntimeError(
+ 'The options "network" and "network_mode" can not be used '
+ 'together.'
+ )
+
+ try:
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+ except ImageNotFound:
+ self.client.images.pull(image)
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+
+ container.start()
+
+ if detach:
+ return container
+
+ exit_status = container.wait()
+ if exit_status != 0:
+ stdout = False
+ stderr = True
+ out = container.logs(stdout=stdout, stderr=stderr)
+ if remove:
+ container.remove()
+ if exit_status != 0:
+ raise ContainerError(container, exit_status, command, image, out)
+ return out
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a container without starting it. Similar to ``docker create``.
+
+ Takes the same arguments as :py:meth:`run`, except for ``stdout``,
+ ``stderr``, and ``remove``.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ kwargs['image'] = image
+ kwargs['command'] = command
+ kwargs['version'] = self.client.api._version
+ create_kwargs = _create_container_args(kwargs)
+ resp = self.client.api.create_container(**create_kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, container_id):
+ """
+ Get a container by name or ID.
+
+ Args:
+ container_id (str): Container name or ID.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the container does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.inspect_container(container_id)
+ return self.prepare_model(resp)
+
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ all (bool): Show all containers. Only running containers are shown
+ by default
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ (list of :py:class:`Container`)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.containers(all=all, before=before,
+ filters=filters, limit=limit,
+ since=since)
+ return [self.get(r['Id']) for r in resp]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_containers(filters=filters)
+ prune.__doc__ = APIClient.prune_containers.__doc__
+
+
+# kwargs to copy straight from run to create
+RUN_CREATE_KWARGS = [
+ 'command',
+ 'detach',
+ 'domainname',
+ 'entrypoint',
+ 'environment',
+ 'healthcheck',
+ 'hostname',
+ 'image',
+ 'labels',
+ 'mac_address',
+ 'name',
+ 'network_disabled',
+ 'stdin_open',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'volume_driver',
+ 'working_dir',
+]
+
+# kwargs to copy straight from run to host_config
+RUN_HOST_CONFIG_KWARGS = [
+ 'blkio_weight_device',
+ 'blkio_weight',
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_period',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpuset_cpus',
+ 'cpuset_mems',
+ 'device_read_bps',
+ 'device_read_iops',
+ 'device_write_bps',
+ 'device_write_iops',
+ 'devices',
+ 'dns_opt',
+ 'dns_search',
+ 'dns',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'init_path',
+ 'ipc_mode',
+ 'isolation',
+ 'kernel_memory',
+ 'links',
+ 'log_config',
+ 'lxc_conf',
+ 'mem_limit',
+ 'mem_reservation',
+ 'mem_swappiness',
+ 'memswap_limit',
+ 'nano_cpus',
+ 'network_mode',
+ 'oom_kill_disable',
+ 'oom_score_adj',
+ 'pid_mode',
+ 'pids_limit',
+ 'privileged',
+ 'publish_all_ports',
+ 'read_only',
+ 'restart_policy',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'tmpfs',
+ 'ulimits',
+ 'userns_mode',
+ 'version',
+ 'volumes_from',
+ 'runtime'
+]
+
+
+def _create_container_args(kwargs):
+ """
+ Convert arguments to create() to arguments to create_container().
+ """
+ # Copy over kwargs which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_CREATE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ host_config_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_HOST_CONFIG_KWARGS:
+ host_config_kwargs[key] = kwargs.pop(key)
+
+ # Process kwargs which are split over both create and host_config
+ ports = kwargs.pop('ports', {})
+ if ports:
+ host_config_kwargs['port_bindings'] = ports
+
+ volumes = kwargs.pop('volumes', {})
+ if volumes:
+ host_config_kwargs['binds'] = volumes
+
+ network = kwargs.pop('network', None)
+ if network:
+ create_kwargs['networking_config'] = {network: None}
+ host_config_kwargs['network_mode'] = network
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error('run', kwargs)
+
+ create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
+
+ # Fill in any kwargs which need processing by create_host_config first
+ port_bindings = create_kwargs['host_config'].get('PortBindings')
+ if port_bindings:
+ # sort to make consistent for tests
+ create_kwargs['ports'] = [tuple(p.split('/', 1))
+ for p in sorted(port_bindings.keys())]
+ binds = create_kwargs['host_config'].get('Binds')
+ if binds:
+ create_kwargs['volumes'] = [_host_volume_from_bind(v) for v in binds]
+ return create_kwargs
+
+
+def _host_volume_from_bind(bind):
+ bits = bind.split(':')
+ if len(bits) == 1:
+ return bits[0]
+ elif len(bits) == 2 and bits[1] in ('ro', 'rw'):
+ return bits[0]
+ else:
+ return bits[1]
diff --git a/docker/models/images.py b/docker/models/images.py
new file mode 100644
index 0000000..d4e24c6
--- /dev/null
+++ b/docker/models/images.py
@@ -0,0 +1,290 @@
+import re
+
+import six
+
+from ..api import APIClient
+from ..errors import BuildError
+from ..utils.json_stream import json_stream
+from .resource import Collection, Model
+
+
+class Image(Model):
+ """
+ An image on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+
+ @property
+ def labels(self):
+ """
+ The labels of an image as dictionary.
+ """
+ result = self.attrs['Config'].get('Labels')
+ return result or {}
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ if self.id.startswith('sha256:'):
+ return self.id[:17]
+ return self.id[:10]
+
+ @property
+ def tags(self):
+ """
+ The image's tags.
+ """
+ tags = self.attrs.get('RepoTags')
+ if tags is None:
+ tags = []
+ return [tag for tag in tags if tag != '<none>:<none>']
+
+ def history(self):
+ """
+ Show the history of an image.
+
+ Returns:
+ (str): The history of the image.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.history(self.id)
+
+ def save(self):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Returns:
+ (urllib3.response.HTTPResponse object): The response from the
+ daemon.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.images.get("fedora:latest")
+ >>> resp = image.save()
+ >>> f = open('/tmp/fedora-latest.tar', 'w')
+ >>> for chunk in resp.stream():
+ >>> f.write(chunk)
+ >>> f.close()
+ """
+ return self.client.api.get_image(self.id)
+
+ def tag(self, repository, tag=None, **kwargs):
+ """
+ Tag this image into a repository. Similar to the ``docker tag``
+ command.
+
+ Args:
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ (bool): ``True`` if successful
+ """
+ return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+
+
+class ImageCollection(Collection):
+ model = Image
+
+ def build(self, **kwargs):
+ """
+ Build an image and return it. Similar to the ``docker build``
+ command. Either ``path`` or ``fileobj`` must be set.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ If you want to get the raw output of the build, use the
+ :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
+ low-level API.
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ labels (dict): A dictionary of labels to set on the image
+ cache_from (list): A list of images used for build cache
+ resolution
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ network_mode (str): networking mode for the run commands during
+ build
+
+ Returns:
+ (:py:class:`Image`): The built image.
+
+ Raises:
+ :py:class:`docker.errors.BuildError`
+ If there is an error during the build.
+ :py:class:`docker.errors.APIError`
+ If the server returns any other error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ resp = self.client.api.build(**kwargs)
+ if isinstance(resp, six.string_types):
+ return self.get(resp)
+ last_event = None
+ image_id = None
+ for chunk in json_stream(resp):
+ if 'error' in chunk:
+ raise BuildError(chunk['error'])
+ if 'stream' in chunk:
+ match = re.search(
+ r'(^Successfully built |sha256:)([0-9a-f]+)$',
+ chunk['stream']
+ )
+ if match:
+ image_id = match.group(2)
+ last_event = chunk
+ if image_id:
+ return self.get(image_id)
+ raise BuildError(last_event or 'Unknown')
+
+ def get(self, name):
+ """
+ Gets an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`Image`): The image.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_image(name))
+
+ def list(self, name=None, all=False, filters=None):
+ """
+ List images on the server.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.images(name=name, all=all, filters=filters)
+ return [self.prepare_model(r) for r in resp]
+
+ def load(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
+ Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.load_image(data)
+
+ def pull(self, name, tag=None, **kwargs):
+ """
+ Pull an image of the given name and return it. Similar to the
+ ``docker pull`` command.
+
+ If you want to get the raw pull output, use the
+ :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
+ low-level API.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ insecure_registry (bool): Use an insecure registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.client.DockerClient.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (:py:class:`Image`): The image that has been pulled.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = client.images.pull('busybox')
+ """
+ self.client.api.pull(name, tag=tag, **kwargs)
+ return self.get('{0}:{1}'.format(name, tag) if tag else name)
+
+ def push(self, repository, tag=None, **kwargs):
+ return self.client.api.push(repository, tag=tag, **kwargs)
+ push.__doc__ = APIClient.push.__doc__
+
+ def remove(self, *args, **kwargs):
+ self.client.api.remove_image(*args, **kwargs)
+ remove.__doc__ = APIClient.remove_image.__doc__
+
+ def search(self, *args, **kwargs):
+ return self.client.api.search(*args, **kwargs)
+ search.__doc__ = APIClient.search.__doc__
+
+ def prune(self, filters=None):
+ return self.client.api.prune_images(filters=filters)
+ prune.__doc__ = APIClient.prune_images.__doc__
diff --git a/docker/models/networks.py b/docker/models/networks.py
new file mode 100644
index 0000000..afb0ebe
--- /dev/null
+++ b/docker/models/networks.py
@@ -0,0 +1,192 @@
+from ..api import APIClient
+from .containers import Container
+from .resource import Model, Collection
+
+
+class Network(Model):
+ """
+ A Docker network.
+ """
+ @property
+ def name(self):
+ """
+ The name of the network.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def containers(self):
+ """
+ The containers that are connected to the network, as a list of
+ :py:class:`~docker.models.containers.Container` objects.
+ """
+ return [
+ self.client.containers.get(cid) for cid in
+ (self.attrs.get('Containers') or {}).keys()
+ ]
+
+ def connect(self, container, *args, **kwargs):
+ """
+ Connect a container to this network.
+
+ Args:
+ container (str): Container to connect to this network, as either
+ an ID, name, or :py:class:`~docker.models.containers.Container`
+ object.
+ aliases (:py:class:`list`): A list of aliases for this endpoint.
+ Names in that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (:py:class:`list`): A list of links for this endpoint.
+ Containers declared in this list will be linkedto this
+ container. Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.connect_container_to_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def disconnect(self, container, *args, **kwargs):
+ """
+ Disconnect a container from this network.
+
+ Args:
+ container (str): Container to disconnect from this network, as
+ either an ID, name, or
+ :py:class:`~docker.models.containers.Container` object.
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.disconnect_container_from_network(
+ container, self.id, *args, **kwargs
+ )
+
+ def remove(self):
+ """
+ Remove this network.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_network(self.id)
+
+
+class NetworkCollection(Collection):
+ """
+ Networks on the Docker server.
+ """
+ model = Network
+
+ def create(self, name, *args, **kwargs):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (dict): Optional custom IP scheme for the network.
+ Created with :py:class:`~docker.types.IPAMConfig`.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``True``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+ ingress (bool): If set, create an ingress network which provides
+ the routing-mesh in swarm mode.
+
+ Returns:
+ (:py:class:`Network`): The network that was created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.networks.create("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.networks.create(
+ "network1",
+ driver="bridge",
+ ipam=ipam_config
+ )
+
+ """
+ resp = self.client.api.create_network(name, *args, **kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, network_id):
+ """
+ Get a network by its ID.
+
+ Args:
+ network_id (str): The ID of the network.
+
+ Returns:
+ (:py:class:`Network`) The network.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the network does not exist.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ return self.prepare_model(self.client.api.inspect_network(network_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (:py:class:`list`): List of names to filter by.
+ ids (:py:class:`list`): List of ids to filter by.
+
+ Returns:
+ (list of :py:class:`Network`) The networks on the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.networks(*args, **kwargs)
+ return [self.prepare_model(item) for item in resp]
+
+ def prune(self, filters=None):
+ self.client.api.prune_networks(filters=filters)
+ prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/docker/models/nodes.py b/docker/models/nodes.py
new file mode 100644
index 0000000..8dd9350
--- /dev/null
+++ b/docker/models/nodes.py
@@ -0,0 +1,107 @@
+from .resource import Model, Collection
+
+
+class Node(Model):
+ """A node in a swarm."""
+ id_attribute = 'ID'
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def update(self, node_spec):
+ """
+ Update the node's configuration.
+
+ Args:
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> node.update(node_spec)
+
+ """
+ return self.client.api.update_node(self.id, self.version, node_spec)
+
+ def remove(self, force=False):
+ """
+ Remove this node from the swarm.
+
+ Args:
+ force (bool): Force remove an active node. Default: `False`
+
+ Returns:
+ `True` if the request was successful.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the node doesn't exist in the swarm.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_node(self.id, force=force)
+
+
+class NodeCollection(Collection):
+ """Nodes on the Docker server."""
+ model = Node
+
+ def get(self, node_id):
+ """
+ Get a node.
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A :py:class:`Node` object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_node(node_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of :py:class:`Node` objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.nodes.list(filters={'role': 'manager'})
+ """
+ return [
+ self.prepare_model(n)
+ for n in self.client.api.nodes(*args, **kwargs)
+ ]
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
new file mode 100644
index 0000000..0688018
--- /dev/null
+++ b/docker/models/plugins.py
@@ -0,0 +1,200 @@
+from .. import errors
+from .resource import Collection, Model
+
+
+class Plugin(Model):
+ """
+ A plugin on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ """
+ The plugin's name.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def enabled(self):
+ """
+ Whether the plugin is enabled.
+ """
+ return self.attrs.get('Enabled')
+
+ @property
+ def settings(self):
+ """
+ A dictionary representing the plugin's configuration.
+ """
+ return self.attrs.get('Settings')
+
+ def configure(self, options):
+ """
+ Update the plugin's settings.
+
+ Args:
+ options (dict): A key-value mapping of options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.configure_plugin(self.name, options)
+ self.reload()
+
+ def disable(self):
+ """
+ Disable the plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ self.client.api.disable_plugin(self.name)
+ self.reload()
+
+ def enable(self, timeout=0):
+ """
+ Enable the plugin.
+
+ Args:
+ timeout (int): Timeout in seconds. Default: 0
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.enable_plugin(self.name, timeout)
+ self.reload()
+
+ def push(self):
+ """
+ Push the plugin to a remote registry.
+
+ Returns:
+ A dict iterator streaming the status of the upload.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.push_plugin(self.name)
+
+ def remove(self, force=False):
+ """
+ Remove the plugin from the server.
+
+ Args:
+ force (bool): Remove even if the plugin is enabled.
+ Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_plugin(self.name, force=force)
+
+ def upgrade(self, remote=None):
+ """
+ Upgrade the plugin.
+
+ Args:
+ remote (string): Remote reference to upgrade to. The
+ ``:latest`` tag is optional and is the default if omitted.
+ Default: this plugin's name.
+
+ Returns:
+ A generator streaming the decoded API logs
+ """
+ if self.enabled:
+ raise errors.DockerError(
+ 'Plugin must be disabled before upgrading.'
+ )
+
+ if remote is None:
+ remote = self.name
+ privileges = self.client.api.plugin_privileges(remote)
+ for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
+ yield d
+ self._reload()
+
+
+class PluginCollection(Collection):
+ model = Plugin
+
+ def create(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ (:py:class:`Plugin`): The newly created plugin.
+ """
+ self.client.api.create_plugin(name, plugin_data_dir, gzip)
+ return self.get(name)
+
+ def get(self, name):
+ """
+ Gets a plugin.
+
+ Args:
+ name (str): The name of the plugin.
+
+ Returns:
+ (:py:class:`Plugin`): The plugin.
+
+ Raises:
+ :py:class:`docker.errors.NotFound` If the plugin does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_plugin(name))
+
+ def install(self, remote_name, local_name=None):
+ """
+ Pull and install a plugin.
+
+ Args:
+ remote_name (string): Remote reference for the plugin to
+ install. The ``:latest`` tag is optional, and is the
+ default if omitted.
+ local_name (string): Local name for the pulled plugin.
+ The ``:latest`` tag is optional, and is the default if
+ omitted. Optional.
+
+ Returns:
+ (:py:class:`Plugin`): The installed plugin
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ privileges = self.client.api.plugin_privileges(remote_name)
+ it = self.client.api.pull_plugin(remote_name, privileges, local_name)
+ for data in it:
+ pass
+ return self.get(local_name or remote_name)
+
+ def list(self):
+ """
+ List plugins installed on the server.
+
+ Returns:
+ (list of :py:class:`Plugin`): The plugins.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.plugins()
+ return [self.prepare_model(r) for r in resp]
diff --git a/docker/models/resource.py b/docker/models/resource.py
new file mode 100644
index 0000000..ed3900a
--- /dev/null
+++ b/docker/models/resource.py
@@ -0,0 +1,93 @@
+
+class Model(object):
+ """
+ A base class for representing a single object on the server.
+ """
+ id_attribute = 'Id'
+
+ def __init__(self, attrs=None, client=None, collection=None):
+ #: A client pointing at the server that this object is on.
+ self.client = client
+
+ #: The collection that this model is part of.
+ self.collection = collection
+
+ #: The raw representation of this object from the API
+ self.attrs = attrs
+ if self.attrs is None:
+ self.attrs = {}
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.id == other.id
+
+ def __hash__(self):
+ return hash("%s:%s" % (self.__class__.__name__, self.id))
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs.get(self.id_attribute)
+
+ @property
+ def short_id(self):
+ """
+ The ID of the object, truncated to 10 characters.
+ """
+ return self.id[:10]
+
+ def reload(self):
+ """
+ Load this object from the server again and update ``attrs`` with the
+ new data.
+ """
+ new_model = self.collection.get(self.id)
+ self.attrs = new_model.attrs
+
+
+class Collection(object):
+ """
+ A base class for representing all objects of a particular type on the
+ server.
+ """
+
+ #: The type of object this collection represents, set by subclasses
+ model = None
+
+ def __init__(self, client=None):
+ #: The client pointing at the server that this collection of objects
+ #: is on.
+ self.client = client
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError(
+ "'{}' object is not callable. You might be trying to use the old "
+ "(pre-2.0) API - use docker.APIClient if so."
+ .format(self.__class__.__name__))
+
+ def list(self):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+ def create(self, attrs=None):
+ raise NotImplementedError
+
+ def prepare_model(self, attrs):
+ """
+ Create a model from a set of attributes.
+ """
+ if isinstance(attrs, Model):
+ attrs.client = self.client
+ attrs.collection = self
+ return attrs
+ elif isinstance(attrs, dict):
+ return self.model(attrs=attrs, client=self.client, collection=self)
+ else:
+ raise Exception("Can't create %s from %s" %
+ (self.model.__name__, attrs))
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
new file mode 100644
index 0000000..ca11ede
--- /dev/null
+++ b/docker/models/secrets.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Secret(Model):
+ """A secret."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this secret.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If secret failed to remove.
+ """
+ return self.client.api.remove_secret(self.id)
+
+
+class SecretCollection(Collection):
+ """Secrets on the Docker server."""
+ model = Secret
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_secret(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_secret.__doc__
+
+ def get(self, secret_id):
+ """
+ Get a secret.
+
+ Args:
+ secret_id (str): Secret ID.
+
+ Returns:
+ (:py:class:`Secret`): The secret.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the secret does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_secret(secret_id))
+
+ def list(self, **kwargs):
+ """
+ List secrets. Similar to the ``docker secret ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Secret`): The secrets.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.secrets(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/services.py b/docker/models/services.py
new file mode 100644
index 0000000..e1e2ea6
--- /dev/null
+++ b/docker/models/services.py
@@ -0,0 +1,273 @@
+import copy
+from docker.errors import create_unexpected_kwargs_error
+from docker.types import TaskTemplate, ContainerSpec
+from .resource import Model, Collection
+
+
+class Service(Model):
+ """A service."""
+ id_attribute = 'ID'
+
+ @property
+ def name(self):
+ """The service's name."""
+ return self.attrs['Spec']['Name']
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def remove(self):
+ """
+ Stop and remove the service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_service(self.id)
+
+ def tasks(self, filters=None):
+ """
+ List the tasks in this service.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``node``,
+ ``label``, and ``desired-state``.
+
+ Returns:
+ (:py:class:`list`): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if filters is None:
+ filters = {}
+ filters['service'] = self.id
+ return self.client.api.tasks(filters=filters)
+
+ def update(self, **kwargs):
+ """
+ Update a service's configuration. Similar to the ``docker service
+ update`` command.
+
+ Takes the same parameters as :py:meth:`~ServiceCollection.create`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # Image is required, so if it hasn't been set, use current image
+ if 'image' not in kwargs:
+ spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ kwargs['image'] = spec['Image']
+
+ create_kwargs = _get_create_service_kwargs('update', kwargs)
+
+ return self.client.api.update_service(
+ self.id,
+ self.version,
+ **create_kwargs
+ )
+
+ def logs(self, **kwargs):
+ """
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns (generator): Logs for the service.
+ """
+ is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
+ 'TTY', False
+ )
+ return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
+
+
+class ServiceCollection(Collection):
+ """Services on the Docker server."""
+ model = Service
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a service. Similar to the ``docker service create`` command.
+
+ Args:
+ image (str): The image name to use for the containers.
+ command (list of str or str): Command to run.
+ args (list of str): Arguments to the command.
+ constraints (list of str): Placement constraints.
+ container_labels (dict): Labels to apply to the container.
+ endpoint_spec (EndpointSpec): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ env (list of str): Environment variables, in the form
+ ``KEY=val``.
+ hostname (string): Hostname to set on the container.
+ labels (dict): Labels to apply to the service.
+ log_driver (str): Log driver to use for containers.
+ log_driver_options (dict): Log driver options.
+ mode (ServiceMode): Scheduling mode for the service.
+ Default:``None``
+ mounts (list of str): Mounts for the containers, in the form
+ ``source:target:options``, where options is either
+ ``ro`` or ``rw``.
+ name (str): Name to give to the service.
+ networks (list of str): List of network names or IDs to attach
+ the service to. Default: ``None``.
+ resources (Resources): Resource limits and reservations.
+ restart_policy (RestartPolicy): Restart policy for containers.
+ secrets (list of :py:class:`docker.types.SecretReference`): List
+ of secrets accessible to containers for this service.
+ stop_grace_period (int): Amount of time to wait for
+ containers to terminate before forcefully killing them.
+ update_config (UpdateConfig): Specification for the update strategy
+ of the service. Default: ``None``
+ user (str): User to run commands as.
+ workdir (str): Working directory for commands to run.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+
+ Returns:
+ (:py:class:`Service`) The created service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ kwargs['image'] = image
+ kwargs['command'] = command
+ create_kwargs = _get_create_service_kwargs('create', kwargs)
+ service_id = self.client.api.create_service(**create_kwargs)
+ return self.get(service_id)
+
+ def get(self, service_id):
+ """
+ Get a service.
+
+ Args:
+ service_id (str): The ID of the service.
+
+ Returns:
+ (:py:class:`Service`): The service.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the service does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_service(service_id))
+
+ def list(self, **kwargs):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id`` and ``name``. Default: ``None``.
+
+ Returns:
+ (list of :py:class:`Service`): The services.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return [
+ self.prepare_model(s)
+ for s in self.client.api.services(**kwargs)
+ ]
+
+
+# kwargs to copy straight over to ContainerSpec
+CONTAINER_SPEC_KWARGS = [
+ 'image',
+ 'command',
+ 'args',
+ 'env',
+ 'hostname',
+ 'workdir',
+ 'user',
+ 'labels',
+ 'mounts',
+ 'stop_grace_period',
+ 'secrets',
+ 'tty'
+]
+
+# kwargs to copy straight over to TaskTemplate
+TASK_TEMPLATE_KWARGS = [
+ 'resources',
+ 'restart_policy',
+]
+
+# kwargs to copy straight over to create_service
+CREATE_SERVICE_KWARGS = [
+ 'name',
+ 'labels',
+ 'mode',
+ 'update_config',
+ 'networks',
+ 'endpoint_spec',
+]
+
+
+def _get_create_service_kwargs(func_name, kwargs):
+ # Copy over things which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CREATE_SERVICE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ container_spec_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CONTAINER_SPEC_KWARGS:
+ container_spec_kwargs[key] = kwargs.pop(key)
+ task_template_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in TASK_TEMPLATE_KWARGS:
+ task_template_kwargs[key] = kwargs.pop(key)
+
+ if 'container_labels' in kwargs:
+ container_spec_kwargs['labels'] = kwargs.pop('container_labels')
+
+ if 'constraints' in kwargs:
+ task_template_kwargs['placement'] = {
+ 'Constraints': kwargs.pop('constraints')
+ }
+
+ if 'log_driver' in kwargs:
+ task_template_kwargs['log_driver'] = {
+ 'Name': kwargs.pop('log_driver'),
+ 'Options': kwargs.pop('log_driver_options', {})
+ }
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error(func_name, kwargs)
+
+ container_spec = ContainerSpec(**container_spec_kwargs)
+ task_template_kwargs['container_spec'] = container_spec
+ create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
+ return create_kwargs
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
new file mode 100644
index 0000000..d3d07ee
--- /dev/null
+++ b/docker/models/swarm.py
@@ -0,0 +1,147 @@
+from docker.api import APIClient
+from docker.errors import APIError
+from docker.types import SwarmSpec
+from .resource import Model
+
+
+class Swarm(Model):
+ """
+ The server's Swarm state. This a singleton that must be reloaded to get
+ the current state of the Swarm.
+ """
+ def __init__(self, *args, **kwargs):
+ super(Swarm, self).__init__(*args, **kwargs)
+ if self.client:
+ try:
+ self.reload()
+ except APIError as e:
+ # FIXME: https://github.com/docker/docker/issues/29192
+ if e.response.status_code not in (406, 503):
+ raise
+
+ @property
+ def version(self):
+ """
+ The version number of the swarm. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, **kwargs):
+ """
+ Initialize a new swarm on this Engine.
+
+ Args:
+ advertise_addr (str): Externally reachable address advertised to
+ other nodes. This can either be an address/port combination in
+ the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used.
+
+ If not specified, it will be automatically detected when
+ possible.
+ listen_addr (str): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: ``0.0.0.0:2377``
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.swarm.init(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, snapshot_interval=5000,
+ log_entries_for_slow_followers=1200
+ )
+
+ """
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster
+ }
+ init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
+ self.client.api.init_swarm(**init_kwargs)
+ self.reload()
+
+ def join(self, *args, **kwargs):
+ return self.client.api.join_swarm(*args, **kwargs)
+ join.__doc__ = APIClient.join_swarm.__doc__
+
+ def leave(self, *args, **kwargs):
+ return self.client.api.leave_swarm(*args, **kwargs)
+ leave.__doc__ = APIClient.leave_swarm.__doc__
+
+ def reload(self):
+ """
+ Inspect the swarm on the server and store the response in
+ :py:attr:`attrs`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.attrs = self.client.api.inspect_swarm()
+
+ def update(self, rotate_worker_token=False, rotate_manager_token=False,
+ **kwargs):
+ """
+ Update the swarm's configuration.
+
+ It takes the same arguments as :py:meth:`init`, except
+ ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
+ addition, it takes these arguments:
+
+ Args:
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ # this seems to have to be set
+ if kwargs.get('node_cert_expiry') is None:
+ kwargs['node_cert_expiry'] = 7776000000000000
+
+ return self.client.api.update_swarm(
+ version=self.version,
+ swarm_spec=SwarmSpec(**kwargs),
+ rotate_worker_token=rotate_worker_token,
+ rotate_manager_token=rotate_manager_token
+ )
diff --git a/docker/models/volumes.py b/docker/models/volumes.py
new file mode 100644
index 0000000..3c2e837
--- /dev/null
+++ b/docker/models/volumes.py
@@ -0,0 +1,99 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Volume(Model):
+ """A volume."""
+ id_attribute = 'Name'
+
+ @property
+ def name(self):
+ """The name of the volume."""
+ return self.attrs['Name']
+
+ def remove(self, force=False):
+ """
+ Remove this volume.
+
+ Args:
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ return self.client.api.remove_volume(self.id, force=force)
+
+
+class VolumeCollection(Collection):
+ """Volumes on the Docker server."""
+ model = Volume
+
+ def create(self, name=None, **kwargs):
+ """
+ Create a volume.
+
+ Args:
+ name (str): Name of the volume. If not specified, the engine
+ generates a name.
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (:py:class:`Volume`): The volume created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.volumes.create(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+
+ """
+ obj = self.client.api.create_volume(name, **kwargs)
+ return self.prepare_model(obj)
+
+ def get(self, volume_id):
+ """
+ Get a volume.
+
+ Args:
+ volume_id (str): Volume name.
+
+ Returns:
+ (:py:class:`Volume`): The volume.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the volume does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_volume(volume_id))
+
+ def list(self, **kwargs):
+ """
+ List volumes. Similar to the ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Volume`): The volumes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.volumes(**kwargs)
+ if not resp.get('Volumes'):
+ return []
+ return [self.prepare_model(obj) for obj in resp['Volumes']]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_volumes(filters=filters)
+ prune.__doc__ = APIClient.prune_volumes.__doc__
diff --git a/docker/ssladapter/__init__.py b/docker/ssladapter/__init__.py
deleted file mode 100644
index 1a5e1bb..0000000
--- a/docker/ssladapter/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .ssladapter import SSLAdapter # flake8: noqa
diff --git a/docker/tls.py b/docker/tls.py
index 7abfa60..6488bbc 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,10 +2,24 @@ import os
import ssl
from . import errors
-from .ssladapter import ssladapter
+from .transport import SSLAdapter
class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
cert = None
ca_cert = None
verify = None
@@ -42,7 +56,7 @@ class TLSConfig(object):
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
- not os.path.isfile(tls_key)):
+ not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
@@ -58,6 +72,9 @@ class TLSConfig(object):
)
def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
@@ -68,7 +85,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(
+ client.mount('https://', SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index 46dfdf8..abbee18 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,7 +1,8 @@
# flake8: noqa
from .unixconn import UnixAdapter
+from .ssladapter import SSLAdapter
try:
from .npipeconn import NpipeAdapter
from .npipesocket import NpipeSocket
except ImportError:
- pass \ No newline at end of file
+ pass
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index 017738e..ab9b904 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -69,12 +69,17 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class NpipeAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout']
+
def __init__(self, base_url, timeout=60,
- num_pools=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.pools = RecentlyUsedContainer(
- num_pools, dispose_func=lambda p: p.close()
+ pool_connections, dispose_func=lambda p: p.close()
)
super(NpipeAdapter, self).__init__()
@@ -96,7 +101,7 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
- # See also: https://github.com/docker/docker-py/issues/811
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
def close(self):
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index 3b1b644..c04b39d 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -29,6 +29,7 @@ class NpipeSocket(object):
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
+
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
diff --git a/docker/ssladapter/ssladapter.py b/docker/transport/ssladapter.py
index e17dfad..8fafec3 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -24,6 +24,11 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
+ 'assert_hostname',
+ 'ssl_version']
+
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index b7905a0..3565cfb 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -50,15 +50,20 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
class UnixAdapter(requests.adapters.HTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout']
+
def __init__(self, socket_url, timeout=60,
- num_pools=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.pools = RecentlyUsedContainer(
- num_pools, dispose_func=lambda p: p.close()
+ pool_connections, dispose_func=lambda p: p.close()
)
super(UnixAdapter, self).__init__()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 3609581..edc919d 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,7 +1,9 @@
# flake8: noqa
-from .containers import LogConfig, Ulimit
+from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .healthcheck import Healthcheck
+from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
- ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate,
- UpdateConfig
+ ContainerSpec, DriverConfig, EndpointSpec, Mount, Placement, Resources,
+ RestartPolicy, SecretReference, ServiceMode, TaskTemplate, UpdateConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 40a44ca..030e292 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,6 +1,14 @@
import six
+import warnings
+from .. import errors
+from ..utils.utils import (
+ convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
+ format_environment, normalize_links, parse_bytes, parse_devices,
+ split_command, version_gte, version_lt,
+)
from .base import DictType
+from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
@@ -90,3 +98,579 @@ class Ulimit(DictType):
@hard.setter
def hard(self, value):
self['Hard'] = value
+
+
+class HostConfig(dict):
+ def __init__(self, version, binds=None, port_bindings=None,
+ lxc_conf=None, publish_all_ports=False, links=None,
+ privileged=False, dns=None, dns_search=None,
+ volumes_from=None, network_mode=None, restart_policy=None,
+ cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_reservation=None,
+ kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None, cpu_period=None,
+ blkio_weight=None, blkio_weight_device=None,
+ device_read_bps=None, device_write_bps=None,
+ device_read_iops=None, device_write_iops=None,
+ oom_kill_disable=False, shm_size=None, sysctls=None,
+ tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, pids_limit=None,
+ isolation=None, auto_remove=False, storage_opt=None,
+ init=None, init_path=None, volume_driver=None,
+ cpu_count=None, cpu_percent=None, nano_cpus=None,
+ cpuset_mems=None, runtime=None):
+
+ if mem_limit is not None:
+ self['Memory'] = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ self['MemorySwap'] = parse_bytes(memswap_limit)
+
+ if mem_reservation:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('mem_reservation', '1.21')
+
+ self['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('kernel_memory', '1.21')
+
+ self['KernelMemory'] = parse_bytes(kernel_memory)
+
+ if mem_swappiness is not None:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('mem_swappiness', '1.20')
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ self['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, six.string_types):
+ shm_size = parse_bytes(shm_size)
+
+ self['ShmSize'] = shm_size
+
+ if pid_mode:
+ if version_lt(version, '1.24') and pid_mode != 'host':
+ raise host_config_value_error('pid_mode', pid_mode)
+ self['PidMode'] = pid_mode
+
+ if ipc_mode:
+ self['IpcMode'] = ipc_mode
+
+ if privileged:
+ self['Privileged'] = privileged
+
+ if oom_kill_disable:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('oom_kill_disable', '1.19')
+
+ self['OomKillDisable'] = oom_kill_disable
+
+ if oom_score_adj:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('oom_score_adj', '1.22')
+ if not isinstance(oom_score_adj, int):
+ raise host_config_type_error(
+ 'oom_score_adj', oom_score_adj, 'int'
+ )
+ self['OomScoreAdj'] = oom_score_adj
+
+ if publish_all_ports:
+ self['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ self['ReadonlyRootfs'] = read_only
+
+ if dns_search:
+ self['DnsSearch'] = dns_search
+
+ if network_mode:
+ self['NetworkMode'] = network_mode
+ elif network_mode is None and version_gte(version, '1.20'):
+ self['NetworkMode'] = 'default'
+
+ if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
+ self['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ self['CapAdd'] = cap_add
+
+ if cap_drop:
+ self['CapDrop'] = cap_drop
+
+ if devices:
+ self['Devices'] = parse_devices(devices)
+
+ if group_add:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('group_add', '1.20')
+
+ self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+
+ if dns is not None:
+ self['Dns'] = dns
+
+ if dns_opt is not None:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('dns_opt', '1.21')
+
+ self['DnsOptions'] = dns_opt
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise host_config_type_error(
+ 'security_opt', security_opt, 'list'
+ )
+
+ self['SecurityOpt'] = security_opt
+
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ self['Sysctls'] = {}
+ for k, v in six.iteritems(sysctls):
+ self['Sysctls'][k] = six.text_type(v)
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+
+ self['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ self['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ self['PortBindings'] = convert_port_bindings(port_bindings)
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = [
+ '{0}:{1}'.format(k, v)
+ for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+ self['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ self['Links'] = normalize_links(links)
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in six.iteritems(lxc_conf):
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ self['LxcConf'] = lxc_conf
+
+ if cgroup_parent is not None:
+ self['CgroupParent'] = cgroup_parent
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise host_config_type_error('ulimits', ulimits, 'list')
+ self['Ulimits'] = []
+ for l in ulimits:
+ if not isinstance(l, Ulimit):
+ l = Ulimit(**l)
+ self['Ulimits'].append(l)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
+ )
+ log_config = LogConfig(**log_config)
+
+ self['LogConfig'] = log_config
+
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
+ if version_lt(version, '1.19'):
+ raise host_config_version_error('cpu_quota', '1.19')
+
+ self['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
+ if version_lt(version, '1.19'):
+ raise host_config_version_error('cpu_period', '1.19')
+
+ self['CpuPeriod'] = cpu_period
+
+ if cpu_shares:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpu_shares', '1.18')
+
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ self['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpuset_cpus', '1.18')
+
+ self['CpusetCpus'] = cpuset_cpus
+
+ if cpuset_mems:
+ if version_lt(version, '1.19'):
+ raise host_config_version_error('cpuset_mems', '1.19')
+
+ if not isinstance(cpuset_mems, str):
+ raise host_config_type_error(
+ 'cpuset_mems', cpuset_mems, 'str'
+ )
+ self['CpusetMems'] = cpuset_mems
+
+ if blkio_weight:
+ if not isinstance(blkio_weight, int):
+ raise host_config_type_error(
+ 'blkio_weight', blkio_weight, 'int'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight', '1.22')
+ self["BlkioWeight"] = blkio_weight
+
+ if blkio_weight_device:
+ if not isinstance(blkio_weight_device, list):
+ raise host_config_type_error(
+ 'blkio_weight_device', blkio_weight_device, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight_device', '1.22')
+ self["BlkioWeightDevice"] = blkio_weight_device
+
+ if device_read_bps:
+ if not isinstance(device_read_bps, list):
+ raise host_config_type_error(
+ 'device_read_bps', device_read_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_bps', '1.22')
+ self["BlkioDeviceReadBps"] = device_read_bps
+
+ if device_write_bps:
+ if not isinstance(device_write_bps, list):
+ raise host_config_type_error(
+ 'device_write_bps', device_write_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_bps', '1.22')
+ self["BlkioDeviceWriteBps"] = device_write_bps
+
+ if device_read_iops:
+ if not isinstance(device_read_iops, list):
+ raise host_config_type_error(
+ 'device_read_iops', device_read_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_iops', '1.22')
+ self["BlkioDeviceReadIOps"] = device_read_iops
+
+ if device_write_iops:
+ if not isinstance(device_write_iops, list):
+ raise host_config_type_error(
+ 'device_write_iops', device_write_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_iops', '1.22')
+ self["BlkioDeviceWriteIOps"] = device_write_iops
+
+ if tmpfs:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('tmpfs', '1.22')
+ self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ self['UsernsMode'] = userns_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ self["PidsLimit"] = pids_limit
+
+ if isolation:
+ if not isinstance(isolation, six.string_types):
+ raise host_config_type_error('isolation', isolation, 'string')
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('isolation', '1.24')
+ self['Isolation'] = isolation
+
+ if auto_remove:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('auto_remove', '1.25')
+ self['AutoRemove'] = auto_remove
+
+ if storage_opt is not None:
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('storage_opt', '1.24')
+ self['StorageOpt'] = storage_opt
+
+ if init is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init', '1.25')
+ self['Init'] = init
+
+ if init_path is not None:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('init_path', '1.25')
+
+ if version_gte(version, '1.29'):
+ # https://github.com/moby/moby/pull/32470
+ raise host_config_version_error('init_path', '1.29', False)
+ self['InitPath'] = init_path
+
+ if volume_driver is not None:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('volume_driver', '1.21')
+ self['VolumeDriver'] = volume_driver
+
+ if cpu_count:
+ if not isinstance(cpu_count, int):
+ raise host_config_type_error('cpu_count', cpu_count, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_count', '1.25')
+
+ self['CpuCount'] = cpu_count
+
+ if cpu_percent:
+ if not isinstance(cpu_percent, int):
+ raise host_config_type_error('cpu_percent', cpu_percent, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('cpu_percent', '1.25')
+
+ self['CpuPercent'] = cpu_percent
+
+ if nano_cpus:
+ if not isinstance(nano_cpus, six.integer_types):
+ raise host_config_type_error('nano_cpus', nano_cpus, 'int')
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('nano_cpus', '1.25')
+
+ self['NanoCpus'] = nano_cpus
+
+ if runtime:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('runtime', '1.25')
+ self['Runtime'] = runtime
+
+
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+class ContainerConfig(dict):
+ def __init__(
+ self, version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
+ environment=None, volumes=None, volumes_from=None,
+ network_disabled=False, entrypoint=None, cpu_shares=None,
+ working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
+ host_config=None, mac_address=None, labels=None, volume_driver=None,
+ stop_signal=None, networking_config=None, healthcheck=None,
+ stop_timeout=None, runtime=None
+ ):
+ if version_gte(version, '1.10'):
+ message = ('{0!r} parameter has no effect on create_container().'
+ ' It has been moved to host_config')
+ if dns is not None:
+ raise errors.InvalidVersion(message.format('dns'))
+ if volumes_from is not None:
+ raise errors.InvalidVersion(message.format('volumes_from'))
+
+ if version_lt(version, '1.18'):
+ if labels is not None:
+ raise errors.InvalidVersion(
+ 'labels were only introduced in API version 1.18'
+ )
+ else:
+ if cpuset is not None or cpu_shares is not None:
+ warnings.warn(
+ 'The cpuset_cpus and cpu_shares options have been moved to'
+ ' host_config in API version 1.18, and will be removed',
+ DeprecationWarning
+ )
+
+ if version_lt(version, '1.19'):
+ if volume_driver is not None:
+ raise errors.InvalidVersion(
+ 'Volume drivers were only introduced in API version 1.19'
+ )
+ mem_limit = mem_limit if mem_limit is not None else 0
+ memswap_limit = memswap_limit if memswap_limit is not None else 0
+ else:
+ if mem_limit is not None:
+ raise errors.InvalidVersion(
+ 'mem_limit has been moved to host_config in API version'
+ ' 1.19'
+ )
+
+ if memswap_limit is not None:
+ raise errors.InvalidVersion(
+ 'memswap_limit has been moved to host_config in API '
+ 'version 1.19'
+ )
+
+ if version_lt(version, '1.21'):
+ if stop_signal is not None:
+ raise errors.InvalidVersion(
+ 'stop_signal was only introduced in API version 1.21'
+ )
+ else:
+ if volume_driver is not None:
+ warnings.warn(
+ 'The volume_driver option has been moved to'
+ ' host_config in API version 1.21, and will be removed',
+ DeprecationWarning
+ )
+
+ if stop_timeout is not None and version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'stop_timeout was only introduced in API version 1.25'
+ )
+
+ if healthcheck is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'Health options were only introduced in API version 1.24'
+ )
+
+ if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
+ raise errors.InvalidVersion(
+ 'healthcheck start period was introduced in API '
+ 'version 1.29'
+ )
+
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+
+ if isinstance(environment, dict):
+ environment = format_environment(environment)
+
+ if isinstance(labels, list):
+ labels = dict((lbl, six.text_type('')) for lbl in labels)
+
+ if mem_limit is not None:
+ mem_limit = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ memswap_limit = parse_bytes(memswap_limit)
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if volumes_from:
+ if not isinstance(volumes_from, six.string_types):
+ volumes_from = ','.join(volumes_from)
+ else:
+ # Force None, an empty list or dict causes client.start to fail
+ volumes_from = None
+
+ if healthcheck and isinstance(healthcheck, dict):
+ healthcheck = Healthcheck(**healthcheck)
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ self.update({
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': six.text_type(user) if user else None,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'Memory': mem_limit,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Dns': dns,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'VolumesFrom': volumes_from,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'CpuShares': cpu_shares,
+ 'Cpuset': cpuset,
+ 'CpusetCpus': cpuset,
+ 'WorkingDir': working_dir,
+ 'MemorySwap': memswap_limit,
+ 'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ 'VolumeDriver': volume_driver,
+ 'StopSignal': stop_signal,
+ 'Healthcheck': healthcheck,
+ 'StopTimeout': stop_timeout,
+ 'Runtime': runtime
+ })
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
new file mode 100644
index 0000000..8ea9a35
--- /dev/null
+++ b/docker/types/healthcheck.py
@@ -0,0 +1,63 @@
+from .base import DictType
+
+import six
+
+
+class Healthcheck(DictType):
+ def __init__(self, **kwargs):
+ test = kwargs.get('test', kwargs.get('Test'))
+ if isinstance(test, six.string_types):
+ test = ["CMD-SHELL", test]
+
+ interval = kwargs.get('interval', kwargs.get('Interval'))
+ timeout = kwargs.get('timeout', kwargs.get('Timeout'))
+ retries = kwargs.get('retries', kwargs.get('Retries'))
+ start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
+
+ super(Healthcheck, self).__init__({
+ 'Test': test,
+ 'Interval': interval,
+ 'Timeout': timeout,
+ 'Retries': retries,
+ 'StartPeriod': start_period
+ })
+
+ @property
+ def test(self):
+ return self['Test']
+
+ @test.setter
+ def test(self, value):
+ self['Test'] = value
+
+ @property
+ def interval(self):
+ return self['Interval']
+
+ @interval.setter
+ def interval(self, value):
+ self['Interval'] = value
+
+ @property
+ def timeout(self):
+ return self['Timeout']
+
+ @timeout.setter
+ def timeout(self, value):
+ self['Timeout'] = value
+
+ @property
+ def retries(self):
+ return self['Retries']
+
+ @retries.setter
+ def retries(self, value):
+ self['Retries'] = value
+
+ @property
+ def start_period(self):
+ return self['StartPeriod']
+
+ @start_period.setter
+ def start_period(self, value):
+ self['StartPeriod'] = value
diff --git a/docker/types/networks.py b/docker/types/networks.py
new file mode 100644
index 0000000..1c7b2c9
--- /dev/null
+++ b/docker/types/networks.py
@@ -0,0 +1,111 @@
+from .. import errors
+from ..utils import normalize_links, version_lt
+
+
+class EndpointConfig(dict):
+ def __init__(self, version, aliases=None, links=None, ipv4_address=None,
+ ipv6_address=None, link_local_ips=None):
+ if version_lt(version, '1.22'):
+ raise errors.InvalidVersion(
+ 'Endpoint config is not supported for API version < 1.22'
+ )
+
+ if aliases:
+ self["Aliases"] = aliases
+
+ if links:
+ self["Links"] = normalize_links(links)
+
+ ipam_config = {}
+ if ipv4_address:
+ ipam_config['IPv4Address'] = ipv4_address
+
+ if ipv6_address:
+ ipam_config['IPv6Address'] = ipv6_address
+
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
+ if ipam_config:
+ self['IPAMConfig'] = ipam_config
+
+
+class NetworkingConfig(dict):
+ def __init__(self, endpoints_config=None):
+ if endpoints_config:
+ self["EndpointsConfig"] = endpoints_config
+
+
+class IPAMConfig(dict):
+ """
+ Create an IPAM (IP Address Management) config dictionary to be used with
+ :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
+
+ Args:
+
+ driver (str): The IPAM driver to use. Defaults to ``default``.
+ pool_configs (:py:class:`list`): A list of pool configurations
+ (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
+ options (dict): Driver options as a key-value dictionary.
+ Defaults to `None`.
+
+ Example:
+
+ >>> ipam_config = docker.types.IPAMConfig(driver='default')
+ >>> network = client.create_network('network1', ipam=ipam_config)
+
+ """
+ def __init__(self, driver='default', pool_configs=None, options=None):
+ self.update({
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ })
+
+ if options:
+ if not isinstance(options, dict):
+ raise TypeError('IPAMConfig options must be a dictionary')
+ self['Options'] = options
+
+
+class IPAMPool(dict):
+ """
+ Create an IPAM pool config dictionary to be added to the
+ ``pool_configs`` parameter of
+ :py:class:`~docker.types.IPAMConfig`.
+
+ Args:
+
+ subnet (str): Custom subnet for this IPAM pool using the CIDR
+ notation. Defaults to ``None``.
+ iprange (str): Custom IP range for endpoints in this IPAM pool using
+ the CIDR notation. Defaults to ``None``.
+ gateway (str): Custom IP address for the pool's gateway.
+ aux_addresses (dict): A dictionary of ``key -> ip_address``
+ relationships specifying auxiliary addresses that need to be
+ allocated by the IPAM driver.
+
+ Example:
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='124.42.0.0/16',
+ iprange='124.42.0.0/24',
+ gateway='124.42.0.254',
+ aux_addresses={
+ 'reserved1': '124.42.1.1'
+ }
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool])
+ """
+ def __init__(self, subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ self.update({
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ })
diff --git a/docker/types/services.py b/docker/types/services.py
index 063779c..8411b70 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,21 +1,50 @@
import six
from .. import errors
+from ..constants import IS_WINDOWS_PLATFORM
+from ..utils import check_resource, format_environment, split_command
class TaskTemplate(dict):
+ """
+ Describe the task specification to be used when creating or updating a
+ service.
+
+ Args:
+
+ container_spec (ContainerSpec): Container settings for containers
+ started as part of this task.
+ log_driver (DriverConfig): Log configuration for containers created as
+ part of the service.
+ resources (Resources): Resource requirements which apply to each
+ individual container created as part of the service.
+ restart_policy (RestartPolicy): Specification for the restart policy
+ which applies to containers created as part of this service.
+ placement (Placement): Placement instructions for the scheduler.
+ If a list is passed instead, it is assumed to be a list of
+ constraints as part of a :py:class:`Placement` object.
+ force_update (int): A counter that triggers an update even if no
+ relevant parameters have been changed.
+ """
def __init__(self, container_spec, resources=None, restart_policy=None,
- placement=None, log_driver=None):
+ placement=None, log_driver=None, force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
+ if isinstance(placement, list):
+ placement = Placement(constraints=placement)
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
+ if force_update is not None:
+ if not isinstance(force_update, int):
+ raise TypeError('force_update must be an integer')
+ self['ForceUpdate'] = force_update
+
@property
def container_spec(self):
return self.get('ContainerSpec')
@@ -34,10 +63,32 @@ class TaskTemplate(dict):
class ContainerSpec(dict):
- def __init__(self, image, command=None, args=None, env=None, workdir=None,
- user=None, labels=None, mounts=None, stop_grace_period=None):
- from ..utils import split_command # FIXME: circular import
+ """
+ Describes the behavior of containers that are part of a task, and is used
+ when declaring a :py:class:`~docker.types.TaskTemplate`.
+ Args:
+
+ image (string): The image name to use for the container.
+ command (string or list): The command to be run in the image.
+ args (:py:class:`list`): Arguments to the command.
+ hostname (string): The hostname to set on the container.
+ env (dict): Environment variables.
+ dir (string): The working directory for commands to run in.
+ user (string): The user inside the container.
+ labels (dict): A map of labels to associate with the service.
+ mounts (:py:class:`list`): A list of specifications for mounts to be
+ added to containers created as part of the service. See the
+ :py:class:`~docker.types.Mount` class for details.
+ stop_grace_period (int): Amount of time to wait for the container to
+ terminate before forcefully killing it.
+ secrets (list of py:class:`SecretReference`): List of secrets to be
+ made available inside the containers.
+ tty (boolean): Whether a pseudo-TTY should be allocated.
+ """
+ def __init__(self, image, command=None, args=None, hostname=None, env=None,
+ workdir=None, user=None, labels=None, mounts=None,
+ stop_grace_period=None, secrets=None, tty=None):
self['Image'] = image
if isinstance(command, six.string_types):
@@ -45,8 +96,13 @@ class ContainerSpec(dict):
self['Command'] = command
self['Args'] = args
+ if hostname is not None:
+ self['Hostname'] = hostname
if env is not None:
- self['Env'] = env
+ if isinstance(env, dict):
+ self['Env'] = format_environment(env)
+ else:
+ self['Env'] = env
if workdir is not None:
self['Dir'] = workdir
if user is not None:
@@ -54,26 +110,60 @@ class ContainerSpec(dict):
if labels is not None:
self['Labels'] = labels
if mounts is not None:
+ parsed_mounts = []
for mount in mounts:
if isinstance(mount, six.string_types):
- mounts.append(Mount.parse_mount_string(mount))
- mounts.remove(mount)
- self['Mounts'] = mounts
+ parsed_mounts.append(Mount.parse_mount_string(mount))
+ else:
+ # If mount already parsed
+ parsed_mounts.append(mount)
+ self['Mounts'] = parsed_mounts
if stop_grace_period is not None:
self['StopGracePeriod'] = stop_grace_period
+ if secrets is not None:
+ if not isinstance(secrets, list):
+ raise TypeError('secrets must be a list')
+ self['Secrets'] = secrets
+
+ if tty is not None:
+ self['TTY'] = tty
+
class Mount(dict):
+ """
+ Describes a mounted folder's configuration inside a container. A list of
+ :py:class:`Mount` would be used as part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ target (string): Container path.
+ source (string): Mount source (e.g. a volume name or a host path).
+ type (string): The mount type (``bind`` or ``volume``).
+ Default: ``volume``.
+ read_only (bool): Whether the mount should be read-only.
+ propagation (string): A propagation mode with the value ``[r]private``,
+ ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
+ no_copy (bool): False if the volume should be populated with the data
+ from the target. Default: ``False``. Only valid for the ``volume``
+ type.
+ labels (dict): User-defined name and labels for the volume. Only valid
+ for the ``volume`` type.
+ driver_config (DriverConfig): Volume driver configuration. Only valid
+ for the ``volume`` type.
+ """
def __init__(self, target, source, type='volume', read_only=False,
propagation=None, no_copy=False, labels=None,
driver_config=None):
self['Target'] = target
self['Source'] = source
if type not in ('bind', 'volume'):
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'Only acceptable mount types are `bind` and `volume`.'
)
self['Type'] = type
+ self['ReadOnly'] = read_only
if type == 'bind':
if propagation is not None:
@@ -81,7 +171,7 @@ class Mount(dict):
'Propagation': propagation
}
if any([labels, driver_config, no_copy]):
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'Mount type is binding but volume options have been '
'provided.'
)
@@ -92,11 +182,11 @@ class Mount(dict):
if labels:
volume_opts['Labels'] = labels
if driver_config:
- volume_opts['driver_config'] = driver_config
+ volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
if propagation:
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'Mount type is volume but `propagation` argument has been '
'provided.'
)
@@ -105,19 +195,39 @@ class Mount(dict):
def parse_mount_string(cls, string):
parts = string.split(':')
if len(parts) > 3:
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'Invalid mount format "{0}"'.format(string)
)
if len(parts) == 1:
- return cls(target=parts[0])
+ return cls(target=parts[0], source=None)
else:
target = parts[1]
source = parts[0]
- read_only = not (len(parts) == 3 or parts[2] == 'ro')
- return cls(target, source, read_only=read_only)
+ mount_type = 'volume'
+ if source.startswith('/') or (
+ IS_WINDOWS_PLATFORM and source[0].isalpha() and
+ source[1] == ':'
+ ):
+ # FIXME: That windows condition will fail earlier since we
+ # split on ':'. We should look into doing a smarter split
+ # if we detect we are on Windows.
+ mount_type = 'bind'
+ read_only = not (len(parts) == 2 or parts[2] == 'rw')
+ return cls(target, source, read_only=read_only, type=mount_type)
class Resources(dict):
+ """
+ Configures resource allocation for containers when made part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ cpu_limit (int): CPU limit in units of 10^9 CPU shares.
+ mem_limit (int): Memory limit in Bytes.
+ cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
+ mem_reservation (int): Memory reservation in Bytes.
+ """
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None):
limits = {}
@@ -138,16 +248,49 @@ class Resources(dict):
class UpdateConfig(dict):
- def __init__(self, parallelism=0, delay=None, failure_action='continue'):
+ """
+
+ Used to specify the way container updates should be performed by a service.
+
+ Args:
+
+ parallelism (int): Maximum number of tasks to be updated in one
+ iteration (0 means unlimited parallelism). Default: 0.
+ delay (int): Amount of time between updates.
+ failure_action (string): Action to take if an updated task fails to
+ run, or stops running during the update. Acceptable values are
+ ``continue`` and ``pause``. Default: ``continue``
+ monitor (int): Amount of time to monitor each updated task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ an update before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ """
+ def __init__(self, parallelism=0, delay=None, failure_action='continue',
+ monitor=None, max_failure_ratio=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue'):
- raise errors.DockerError(
+ raise errors.InvalidArgument(
'failure_action must be either `pause` or `continue`.'
)
self['FailureAction'] = failure_action
+ if monitor is not None:
+ if not isinstance(monitor, int):
+ raise TypeError('monitor must be an integer')
+ self['Monitor'] = monitor
+
+ if max_failure_ratio is not None:
+ if not isinstance(max_failure_ratio, (float, int)):
+ raise TypeError('max_failure_ratio must be a float')
+ if max_failure_ratio > 1 or max_failure_ratio < 0:
+ raise errors.InvalidArgument(
+ 'max_failure_ratio must be a number between 0 and 1'
+ )
+ self['MaxFailureRatio'] = max_failure_ratio
+
class RestartConditionTypesEnum(object):
_values = (
@@ -159,6 +302,21 @@ class RestartConditionTypesEnum(object):
class RestartPolicy(dict):
+ """
+ Used when creating a :py:class:`~docker.types.ContainerSpec`,
+ dictates whether a container should restart after stopping or failing.
+
+ Args:
+
+ condition (string): Condition for restart (``none``, ``on-failure``,
+ or ``any``). Default: `none`.
+ delay (int): Delay between restart attempts. Default: 0
+ max_attempts (int): Maximum attempts to restart a given container
+ before giving up. Default value is 0, which is ignored.
+ window (int): Time window used to evaluate the restart policy. Default
+ value is 0, which is unbounded.
+ """
+
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
@@ -175,7 +333,153 @@ class RestartPolicy(dict):
class DriverConfig(dict):
+ """
+ Indicates which driver to use, as well as its configuration. Can be used
+ as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
+ and for the `driver_config` in a volume
+ :py:class:`~docker.types.Mount`.
+
+ Args:
+
+ name (string): Name of the driver to use.
+ options (dict): Driver-specific options. Default: ``None``.
+ """
def __init__(self, name, options=None):
self['Name'] = name
if options:
self['Options'] = options
+
+
+class EndpointSpec(dict):
+ """
+ Describes properties to access and load-balance a service.
+
+ Args:
+
+ mode (string): The mode of resolution to use for internal load
+ balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
+ ``'vip'`` if not provided.
+ ports (dict): Exposed ports that this service is accessible on from the
+ outside, in the form of ``{ target_port: published_port }`` or
+ ``{ target_port: (published_port, protocol) }``. Ports can only be
+ provided if the ``vip`` resolution mode is used.
+ """
+ def __init__(self, mode=None, ports=None):
+ if ports:
+ self['Ports'] = convert_service_ports(ports)
+ if mode:
+ self['Mode'] = mode
+
+
+def convert_service_ports(ports):
+ if isinstance(ports, list):
+ return ports
+ if not isinstance(ports, dict):
+ raise TypeError(
+ 'Invalid type for ports, expected dict or list'
+ )
+
+ result = []
+ for k, v in six.iteritems(ports):
+ port_spec = {
+ 'Protocol': 'tcp',
+ 'PublishedPort': k
+ }
+
+ if isinstance(v, tuple):
+ port_spec['TargetPort'] = v[0]
+ if len(v) == 2:
+ port_spec['Protocol'] = v[1]
+ else:
+ port_spec['TargetPort'] = v
+
+ result.append(port_spec)
+ return result
+
+
+class ServiceMode(dict):
+ """
+ Indicate whether a service should be deployed as a replicated or global
+ service, and associated parameters
+
+ Args:
+ mode (string): Can be either ``replicated`` or ``global``
+ replicas (int): Number of replicas. For replicated services only.
+ """
+ def __init__(self, mode, replicas=None):
+ if mode not in ('replicated', 'global'):
+ raise errors.InvalidArgument(
+ 'mode must be either "replicated" or "global"'
+ )
+ if mode != 'replicated' and replicas is not None:
+ raise errors.InvalidArgument(
+ 'replicas can only be used for replicated mode'
+ )
+ self[mode] = {}
+ if replicas is not None:
+ self[mode]['Replicas'] = replicas
+
+ @property
+ def mode(self):
+ if 'global' in self:
+ return 'global'
+ return 'replicated'
+
+ @property
+ def replicas(self):
+ if self.mode != 'replicated':
+ return None
+ return self['replicated'].get('Replicas')
+
+
+class SecretReference(dict):
+ """
+ Secret reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a secret is made accessible inside the service's
+ containers.
+
+ Args:
+ secret_id (string): Secret's ID
+ secret_name (string): Secret's name as defined at its creation.
+ filename (string): Name of the file containing the secret. Defaults
+ to the secret's name if not specified.
+ uid (string): UID of the secret file's owner. Default: 0
+ gid (string): GID of the secret file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource('secret_id')
+ def __init__(self, secret_id, secret_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['SecretName'] = secret_name
+ self['SecretID'] = secret_id
+ self['File'] = {
+ 'Name': filename or secret_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
+
+
+class Placement(dict):
+ """
+ Placement constraints to be used as part of a :py:class:`TaskTemplate`
+
+ Args:
+ constraints (list): A list of constraints
+ preferences (list): Preferences provide a way to make the
+ scheduler aware of factors such as topology. They are provided
+ in order from highest to lowest precedence.
+ platforms (list): A list of platforms expressed as ``(arch, os)``
+ tuples
+ """
+ def __init__(self, constraints=None, preferences=None, platforms=None):
+ if constraints is not None:
+ self['Constraints'] = constraints
+ if preferences is not None:
+ self['Preferences'] = preferences
+ if platforms:
+ self['Platforms'] = []
+ for plat in platforms:
+ self['Platforms'].append({
+ 'Architecture': plat[0], 'OS': plat[1]
+ })
diff --git a/docker/types/swarm.py b/docker/types/swarm.py
index 865fde6..49beaa1 100644
--- a/docker/types/swarm.py
+++ b/docker/types/swarm.py
@@ -8,8 +8,11 @@ class SwarmSpec(dict):
self['Orchestration'] = {
'TaskHistoryRetentionLimit': task_history_retention_limit
}
- if any([snapshot_interval, keep_old_snapshots,
- log_entries_for_slow_followers, heartbeat_tick, election_tick]):
+ if any([snapshot_interval,
+ keep_old_snapshots,
+ log_entries_for_slow_followers,
+ heartbeat_tick,
+ election_tick]):
self['Raft'] = {
'SnapshotInterval': snapshot_interval,
'KeepOldSnapshots': keep_old_snapshots,
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 4bb3876..b758cbd 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,13 +1,13 @@
# flake8: noqa
+from .build import tar, exclude_paths
+from .decorators import check_resource, minimum_version, update_headers
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
- mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
+ mkbuildcontext, parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
- create_host_config, create_container_config, parse_bytes, ping_registry,
- parse_env_file, version_lt, version_gte, decode_json_header, split_command,
- create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
+ create_host_config, parse_bytes, ping_registry, parse_env_file, version_lt,
+ version_gte, decode_json_header, split_command, create_ipam_config,
+ create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
+ format_environment, create_archive
)
-from ..types import LogConfig, Ulimit
-from ..types import SwarmExternalCA, SwarmSpec
-from .decorators import check_resource, minimum_version, update_headers
diff --git a/docker/utils/build.py b/docker/utils/build.py
new file mode 100644
index 0000000..79b7249
--- /dev/null
+++ b/docker/utils/build.py
@@ -0,0 +1,142 @@
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+from .fnmatch import fnmatch
+from .utils import create_archive
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
+ root=root, fileobj=fileobj, gzip=gzip
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ exceptions = [p for p in patterns if p.startswith('!')]
+
+ include_patterns = [p[1:] for p in exceptions]
+ include_patterns += [dockerfile, '.dockerignore']
+
+ exclude_patterns = list(set(patterns) - set(exceptions))
+
+ paths = get_paths(root, exclude_patterns, include_patterns,
+ has_exceptions=len(exceptions) > 0)
+
+ return set(paths).union(
+ # If the Dockerfile is in a subdirectory that is excluded, get_paths
+ # will not descend into it and the file will be skipped. This ensures
+ # it doesn't happen.
+ set([dockerfile.replace('/', os.path.sep)])
+ if os.path.exists(os.path.join(root, dockerfile)) else set()
+ )
+
+
+def should_include(path, exclude_patterns, include_patterns):
+ """
+ Given a path, a list of exclude patterns, and a list of inclusion patterns:
+
+ 1. Returns True if the path doesn't match any exclusion pattern
+ 2. Returns False if the path matches an exclusion pattern and doesn't match
+ an inclusion pattern
+ 3. Returns true if the path matches an exclusion pattern and matches an
+ inclusion pattern
+ """
+ for pattern in exclude_patterns:
+ if match_path(path, pattern):
+ for pattern in include_patterns:
+ if match_path(path, pattern):
+ return True
+ return False
+ return True
+
+
+def should_check_directory(directory_path, exclude_patterns, include_patterns):
+ """
+ Given a directory path, a list of exclude patterns, and a list of inclusion
+ patterns:
+
+ 1. Returns True if the directory path should be included according to
+ should_include.
+ 2. Returns True if the directory path is the prefix for an inclusion
+ pattern
+ 3. Returns False otherwise
+ """
+
+ # To account for exception rules, check directories if their path is a
+ # a prefix to an inclusion pattern. This logic conforms with the current
+ # docker logic (2016-10-27):
+ # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
+
+ def normalize_path(path):
+ return path.replace(os.path.sep, '/')
+
+ path_with_slash = normalize_path(directory_path) + '/'
+ possible_child_patterns = [
+ pattern for pattern in map(normalize_path, include_patterns)
+ if (pattern + '/').startswith(path_with_slash)
+ ]
+ directory_included = should_include(
+ directory_path, exclude_patterns, include_patterns
+ )
+ return directory_included or len(possible_child_patterns) > 0
+
+
+def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
+ paths = []
+
+ for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
+ parent = os.path.relpath(parent, root)
+ if parent == '.':
+ parent = ''
+
+ # Remove excluded patterns from the list of directories to traverse
+ # by mutating the dirs we're iterating over.
+ # This looks strange, but is considered the correct way to skip
+ # traversal. See https://docs.python.org/2/library/os.html#os.walk
+ dirs[:] = [
+ d for d in dirs if should_check_directory(
+ os.path.join(parent, d), exclude_patterns, include_patterns
+ )
+ ]
+
+ for path in dirs:
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
+ for path in files:
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
+ return paths
+
+
+def match_path(path, pattern):
+ pattern = pattern.rstrip('/' + os.path.sep)
+ if pattern:
+ pattern = os.path.relpath(pattern)
+
+ pattern_components = pattern.split(os.path.sep)
+ if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM:
+ pattern_components = pattern.split('/')
+
+ if '**' not in pattern:
+ path_components = path.split(os.path.sep)[:len(pattern_components)]
+ else:
+ path_components = path.split(os.path.sep)
+ return fnmatch('/'.join(path_components), '/'.join(pattern_components))
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 2fe880c..5e195c0 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -4,22 +4,21 @@ from .. import errors
from . import utils
-def check_resource(f):
- @functools.wraps(f)
- def wrapped(self, resource_id=None, *args, **kwargs):
- if resource_id is None:
- if kwargs.get('container'):
- resource_id = kwargs.pop('container')
- elif kwargs.get('image'):
- resource_id = kwargs.pop('image')
- if isinstance(resource_id, dict):
- resource_id = resource_id.get('Id', resource_id.get('ID'))
- if not resource_id:
- raise errors.NullResource(
- 'image or container param is undefined'
- )
- return f(self, resource_id, *args, **kwargs)
- return wrapped
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
def minimum_version(version):
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
new file mode 100644
index 0000000..e95b63c
--- /dev/null
+++ b/docker/utils/fnmatch.py
@@ -0,0 +1,103 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ recursive_mode = False
+ i, n = 0, len(pat)
+ res = ''
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ recursive_mode = True
+ i = i + 1
+ res = res + '.*'
+ elif c == '?':
+ res = res + '.'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ elif recursive_mode and c == '/':
+ res = res + re.escape(c) + '?'
+ else:
+ res = res + re.escape(c)
+ return res + '\Z(?ms)'
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
new file mode 100644
index 0000000..addffdf
--- /dev/null
+++ b/docker/utils/json_stream.py
@@ -0,0 +1,80 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import json.decoder
+
+import six
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
new file mode 100644
index 0000000..bf7d697
--- /dev/null
+++ b/docker/utils/ports.py
@@ -0,0 +1,83 @@
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ "((?P<host>[a-fA-F\d.:]+):)?" # Address
+ "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return ['{}-{}'.format(start, end) + proto]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/docker/utils/ports/__init__.py b/docker/utils/ports/__init__.py
deleted file mode 100644
index 1dbfa3a..0000000
--- a/docker/utils/ports/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .ports import (
- split_port,
- build_port_bindings
-) # flake8: noqa
diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py
deleted file mode 100644
index 326ef94..0000000
--- a/docker/utils/ports/ports.py
+++ /dev/null
@@ -1,92 +0,0 @@
-
-def add_port_mapping(port_bindings, internal_port, external):
- if internal_port in port_bindings:
- port_bindings[internal_port].append(external)
- else:
- port_bindings[internal_port] = [external]
-
-
-def add_port(port_bindings, internal_port_range, external_range):
- if external_range is None:
- for internal_port in internal_port_range:
- add_port_mapping(port_bindings, internal_port, None)
- else:
- ports = zip(internal_port_range, external_range)
- for internal_port, external_port in ports:
- add_port_mapping(port_bindings, internal_port, external_port)
-
-
-def build_port_bindings(ports):
- port_bindings = {}
- for port in ports:
- internal_port_range, external_range = split_port(port)
- add_port(port_bindings, internal_port_range, external_range)
- return port_bindings
-
-
-def to_port_range(port):
- if not port:
- return None
-
- protocol = ""
- if "/" in port:
- parts = port.split("/")
- if len(parts) != 2:
- _raise_invalid_port(port)
-
- port, protocol = parts
- protocol = "/" + protocol
-
- parts = str(port).split('-')
-
- if len(parts) == 1:
- return ["%s%s" % (port, protocol)]
-
- if len(parts) == 2:
- full_port_range = range(int(parts[0]), int(parts[1]) + 1)
- return ["%s%s" % (p, protocol) for p in full_port_range]
-
- raise ValueError('Invalid port range "%s", should be '
- 'port or startport-endport' % port)
-
-
-def _raise_invalid_port(port):
- raise ValueError('Invalid port "%s", should be '
- '[[remote_ip:]remote_port[-remote_port]:]'
- 'port[/protocol]' % port)
-
-
-def split_port(port):
- parts = str(port).split(':')
-
- if not 1 <= len(parts) <= 3:
- _raise_invalid_port(port)
-
- if len(parts) == 1:
- internal_port, = parts
- return to_port_range(internal_port), None
- if len(parts) == 2:
- external_port, internal_port = parts
-
- internal_range = to_port_range(internal_port)
- external_range = to_port_range(external_port)
-
- if internal_range is None or external_range is None:
- _raise_invalid_port(port)
-
- if len(internal_range) != len(external_range):
- raise ValueError('Port ranges don\'t match in length')
-
- return internal_range, external_range
-
- external_ip, external_port, internal_port = parts
- internal_range = to_port_range(internal_port)
- external_range = to_port_range(external_port)
- if not external_range:
- external_range = [None] * len(internal_range)
-
- if len(internal_range) != len(external_range):
- raise ValueError('Port ranges don\'t match in length')
-
- return internal_range, [(external_ip, ex_port or None)
- for ex_port in external_range]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 164b845..4080f25 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -69,7 +69,11 @@ def frames_iter(socket):
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n == 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ n -= len(result)
+ yield result
diff --git a/docker/utils/types.py b/docker/utils/types.py
deleted file mode 100644
index 8098c47..0000000
--- a/docker/utils/types.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Compatibility module. See https://github.com/docker/docker-py/issues/1196
-
-import warnings
-
-from ..types import Ulimit, LogConfig # flake8: noqa
-
-warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 8d55b57..d9a6d7c 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -9,7 +9,6 @@ import tempfile
import warnings
from distutils.version import StrictVersion
from datetime import datetime
-from fnmatch import fnmatch
import requests
import six
@@ -17,7 +16,6 @@ import six
from .. import constants
from .. import errors
from .. import tls
-from ..types import Ulimit, LogConfig
if six.PY2:
from urllib import splitnport
@@ -36,21 +34,18 @@ BYTE_UNITS = {
}
-def create_ipam_pool(subnet=None, iprange=None, gateway=None,
- aux_addresses=None):
- return {
- 'Subnet': subnet,
- 'IPRange': iprange,
- 'Gateway': gateway,
- 'AuxiliaryAddresses': aux_addresses
- }
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
-def create_ipam_config(driver='default', pool_configs=None):
- return {
- 'Driver': driver,
- 'Config': pool_configs or []
- }
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
def mkbuildcontext(dockerfile):
@@ -83,122 +78,48 @@ def decode_json_header(header):
return json.loads(data)
-def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ for path in files:
+ i = t.gettarinfo(os.path.join(root, path), arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
- root = os.path.abspath(path)
- exclude = exclude or []
-
- for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
- t.add(os.path.join(root, path), arcname=path, recursive=False)
+ if constants.IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+ try:
+ # We open the file object in binary mode for Windows support.
+ with open(os.path.join(root, path), 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ # When we encounter a directory the file object is set to None.
+ t.addfile(i, None)
t.close()
fileobj.seek(0)
return fileobj
-def exclude_paths(root, patterns, dockerfile=None):
- """
- Given a root directory path and a list of .dockerignore patterns, return
- an iterator of all paths (both regular files and directories) in the root
- directory that do *not* match any of the patterns.
-
- All paths returned are relative to the root.
- """
- if dockerfile is None:
- dockerfile = 'Dockerfile'
-
- exceptions = [p for p in patterns if p.startswith('!')]
-
- include_patterns = [p[1:] for p in exceptions]
- include_patterns += [dockerfile, '.dockerignore']
-
- exclude_patterns = list(set(patterns) - set(exceptions))
-
- paths = get_paths(root, exclude_patterns, include_patterns,
- has_exceptions=len(exceptions) > 0)
-
- return set(paths).union(
- # If the Dockerfile is in a subdirectory that is excluded, get_paths
- # will not descend into it and the file will be skipped. This ensures
- # it doesn't happen.
- set([dockerfile])
- if os.path.exists(os.path.join(root, dockerfile)) else set()
- )
-
-
-def should_include(path, exclude_patterns, include_patterns):
- """
- Given a path, a list of exclude patterns, and a list of inclusion patterns:
-
- 1. Returns True if the path doesn't match any exclusion pattern
- 2. Returns False if the path matches an exclusion pattern and doesn't match
- an inclusion pattern
- 3. Returns true if the path matches an exclusion pattern and matches an
- inclusion pattern
- """
- for pattern in exclude_patterns:
- if match_path(path, pattern):
- for pattern in include_patterns:
- if match_path(path, pattern):
- return True
- return False
- return True
-
-
-def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
- paths = []
-
- for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
- parent = os.path.relpath(parent, root)
- if parent == '.':
- parent = ''
-
- # If exception rules exist, we can't skip recursing into ignored
- # directories, as we need to look for exceptions in them.
- #
- # It may be possible to optimize this further for exception patterns
- # that *couldn't* match within ignored directores.
- #
- # This matches the current docker logic (as of 2015-11-24):
- # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
-
- if not has_exceptions:
-
- # Remove excluded patterns from the list of directories to traverse
- # by mutating the dirs we're iterating over.
- # This looks strange, but is considered the correct way to skip
- # traversal. See https://docs.python.org/2/library/os.html#os.walk
-
- dirs[:] = [d for d in dirs if
- should_include(os.path.join(parent, d),
- exclude_patterns, include_patterns)]
-
- for path in dirs:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- for path in files:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- return paths
-
-
-def match_path(path, pattern):
- pattern = pattern.rstrip('/')
- if pattern:
- pattern = os.path.relpath(pattern)
-
- pattern_components = pattern.split(os.path.sep)
- path_components = path.split(os.path.sep)[:len(pattern_components)]
- return fnmatch('/'.join(path_components), pattern)
-
-
def compare_version(v1, v2):
"""Compare docker versions
@@ -361,6 +282,20 @@ def convert_tmpfs_mounts(tmpfs):
return result
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, six.string_types):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
@@ -576,330 +511,6 @@ def parse_bytes(s):
return s
-def host_config_type_error(param, param_value, expected):
- error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
- return TypeError(error_msg.format(param, expected, type(param_value)))
-
-
-def host_config_version_error(param, version, less_than=True):
- operator = '<' if less_than else '>'
- error_msg = '{0} param is not supported in API versions {1} {2}'
- return errors.InvalidVersion(error_msg.format(param, operator, version))
-
-
-def host_config_value_error(param, param_value):
- error_msg = 'Invalid value for {0} param: {1}'
- return ValueError(error_msg.format(param, param_value))
-
-
-def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=False, links=None, privileged=False,
- dns=None, dns_search=None, volumes_from=None,
- network_mode=None, restart_policy=None, cap_add=None,
- cap_drop=None, devices=None, extra_hosts=None,
- read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None, log_config=None,
- mem_limit=None, memswap_limit=None,
- mem_reservation=None, kernel_memory=None,
- mem_swappiness=None, cgroup_parent=None,
- group_add=None, cpu_quota=None,
- cpu_period=None, blkio_weight=None,
- blkio_weight_device=None, device_read_bps=None,
- device_write_bps=None, device_read_iops=None,
- device_write_iops=None, oom_kill_disable=False,
- shm_size=None, sysctls=None, version=None, tmpfs=None,
- oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None):
-
- host_config = {}
-
- if not version:
- warnings.warn(
- 'docker.utils.create_host_config() is deprecated. Please use '
- 'Client.create_host_config() instead.'
- )
- version = constants.DEFAULT_DOCKER_API_VERSION
-
- if mem_limit is not None:
- host_config['Memory'] = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- host_config['MemorySwap'] = parse_bytes(memswap_limit)
-
- if mem_reservation:
- if version_lt(version, '1.21'):
- raise host_config_version_error('mem_reservation', '1.21')
-
- host_config['MemoryReservation'] = parse_bytes(mem_reservation)
-
- if kernel_memory:
- if version_lt(version, '1.21'):
- raise host_config_version_error('kernel_memory', '1.21')
-
- host_config['KernelMemory'] = parse_bytes(kernel_memory)
-
- if mem_swappiness is not None:
- if version_lt(version, '1.20'):
- raise host_config_version_error('mem_swappiness', '1.20')
- if not isinstance(mem_swappiness, int):
- raise host_config_type_error(
- 'mem_swappiness', mem_swappiness, 'int'
- )
-
- host_config['MemorySwappiness'] = mem_swappiness
-
- if shm_size is not None:
- if isinstance(shm_size, six.string_types):
- shm_size = parse_bytes(shm_size)
-
- host_config['ShmSize'] = shm_size
-
- if pid_mode not in (None, 'host'):
- raise host_config_value_error('pid_mode', pid_mode)
- elif pid_mode:
- host_config['PidMode'] = pid_mode
-
- if ipc_mode:
- host_config['IpcMode'] = ipc_mode
-
- if privileged:
- host_config['Privileged'] = privileged
-
- if oom_kill_disable:
- if version_lt(version, '1.20'):
- raise host_config_version_error('oom_kill_disable', '1.19')
-
- host_config['OomKillDisable'] = oom_kill_disable
-
- if oom_score_adj:
- if version_lt(version, '1.22'):
- raise host_config_version_error('oom_score_adj', '1.22')
- if not isinstance(oom_score_adj, int):
- raise host_config_type_error(
- 'oom_score_adj', oom_score_adj, 'int'
- )
- host_config['OomScoreAdj'] = oom_score_adj
-
- if publish_all_ports:
- host_config['PublishAllPorts'] = publish_all_ports
-
- if read_only is not None:
- host_config['ReadonlyRootfs'] = read_only
-
- if dns_search:
- host_config['DnsSearch'] = dns_search
-
- if network_mode:
- host_config['NetworkMode'] = network_mode
- elif network_mode is None and compare_version('1.19', version) > 0:
- host_config['NetworkMode'] = 'default'
-
- if restart_policy:
- if not isinstance(restart_policy, dict):
- raise host_config_type_error(
- 'restart_policy', restart_policy, 'dict'
- )
-
- host_config['RestartPolicy'] = restart_policy
-
- if cap_add:
- host_config['CapAdd'] = cap_add
-
- if cap_drop:
- host_config['CapDrop'] = cap_drop
-
- if devices:
- host_config['Devices'] = parse_devices(devices)
-
- if group_add:
- if version_lt(version, '1.20'):
- raise host_config_version_error('group_add', '1.20')
-
- host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
-
- if dns is not None:
- host_config['Dns'] = dns
-
- if dns_opt is not None:
- if version_lt(version, '1.21'):
- raise host_config_version_error('dns_opt', '1.21')
-
- host_config['DnsOptions'] = dns_opt
-
- if security_opt is not None:
- if not isinstance(security_opt, list):
- raise host_config_type_error('security_opt', security_opt, 'list')
-
- host_config['SecurityOpt'] = security_opt
-
- if sysctls:
- if not isinstance(sysctls, dict):
- raise host_config_type_error('sysctls', sysctls, 'dict')
- host_config['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- host_config['Sysctls'][k] = six.text_type(v)
-
- if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
- volumes_from = volumes_from.split(',')
-
- host_config['VolumesFrom'] = volumes_from
-
- if binds is not None:
- host_config['Binds'] = convert_volume_binds(binds)
-
- if port_bindings is not None:
- host_config['PortBindings'] = convert_port_bindings(port_bindings)
-
- if extra_hosts is not None:
- if isinstance(extra_hosts, dict):
- extra_hosts = [
- '{0}:{1}'.format(k, v)
- for k, v in sorted(six.iteritems(extra_hosts))
- ]
-
- host_config['ExtraHosts'] = extra_hosts
-
- if links is not None:
- host_config['Links'] = normalize_links(links)
-
- if isinstance(lxc_conf, dict):
- formatted = []
- for k, v in six.iteritems(lxc_conf):
- formatted.append({'Key': k, 'Value': str(v)})
- lxc_conf = formatted
-
- if lxc_conf is not None:
- host_config['LxcConf'] = lxc_conf
-
- if cgroup_parent is not None:
- host_config['CgroupParent'] = cgroup_parent
-
- if ulimits is not None:
- if not isinstance(ulimits, list):
- raise host_config_type_error('ulimits', ulimits, 'list')
- host_config['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- host_config['Ulimits'].append(l)
-
- if log_config is not None:
- if not isinstance(log_config, LogConfig):
- if not isinstance(log_config, dict):
- raise host_config_type_error(
- 'log_config', log_config, 'LogConfig'
- )
- log_config = LogConfig(**log_config)
-
- host_config['LogConfig'] = log_config
-
- if cpu_quota:
- if not isinstance(cpu_quota, int):
- raise host_config_type_error('cpu_quota', cpu_quota, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_quota', '1.19')
-
- host_config['CpuQuota'] = cpu_quota
-
- if cpu_period:
- if not isinstance(cpu_period, int):
- raise host_config_type_error('cpu_period', cpu_period, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_period', '1.19')
-
- host_config['CpuPeriod'] = cpu_period
-
- if cpu_shares:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpu_shares', '1.18')
-
- if not isinstance(cpu_shares, int):
- raise host_config_type_error('cpu_shares', cpu_shares, 'int')
-
- host_config['CpuShares'] = cpu_shares
-
- if cpuset_cpus:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpuset_cpus', '1.18')
-
- host_config['CpuSetCpus'] = cpuset_cpus
-
- if blkio_weight:
- if not isinstance(blkio_weight, int):
- raise host_config_type_error('blkio_weight', blkio_weight, 'int')
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight', '1.22')
- host_config["BlkioWeight"] = blkio_weight
-
- if blkio_weight_device:
- if not isinstance(blkio_weight_device, list):
- raise host_config_type_error(
- 'blkio_weight_device', blkio_weight_device, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight_device', '1.22')
- host_config["BlkioWeightDevice"] = blkio_weight_device
-
- if device_read_bps:
- if not isinstance(device_read_bps, list):
- raise host_config_type_error(
- 'device_read_bps', device_read_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_bps', '1.22')
- host_config["BlkioDeviceReadBps"] = device_read_bps
-
- if device_write_bps:
- if not isinstance(device_write_bps, list):
- raise host_config_type_error(
- 'device_write_bps', device_write_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_bps', '1.22')
- host_config["BlkioDeviceWriteBps"] = device_write_bps
-
- if device_read_iops:
- if not isinstance(device_read_iops, list):
- raise host_config_type_error(
- 'device_read_iops', device_read_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_iops', '1.22')
- host_config["BlkioDeviceReadIOps"] = device_read_iops
-
- if device_write_iops:
- if not isinstance(device_write_iops, list):
- raise host_config_type_error(
- 'device_write_iops', device_write_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_iops', '1.22')
- host_config["BlkioDeviceWriteIOps"] = device_write_iops
-
- if tmpfs:
- if version_lt(version, '1.22'):
- raise host_config_version_error('tmpfs', '1.22')
- host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
-
- if userns_mode:
- if version_lt(version, '1.23'):
- raise host_config_version_error('userns_mode', '1.23')
-
- if userns_mode != "host":
- raise host_config_value_error("userns_mode", userns_mode)
- host_config['UsernsMode'] = userns_mode
-
- if pids_limit:
- if not isinstance(pids_limit, int):
- raise host_config_type_error('pids_limit', pids_limit, 'int')
- if version_lt(version, '1.23'):
- raise host_config_version_error('pids_limit', '1.23')
- host_config["PidsLimit"] = pids_limit
-
- return host_config
-
-
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
@@ -907,50 +518,6 @@ def normalize_links(links):
return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
-def create_networking_config(endpoints_config=None):
- networking_config = {}
-
- if endpoints_config:
- networking_config["EndpointsConfig"] = endpoints_config
-
- return networking_config
-
-
-def create_endpoint_config(version, aliases=None, links=None,
- ipv4_address=None, ipv6_address=None,
- link_local_ips=None):
- if version_lt(version, '1.22'):
- raise errors.InvalidVersion(
- 'Endpoint config is not supported for API version < 1.22'
- )
- endpoint_config = {}
-
- if aliases:
- endpoint_config["Aliases"] = aliases
-
- if links:
- endpoint_config["Links"] = normalize_links(links)
-
- ipam_config = {}
- if ipv4_address:
- ipam_config['IPv4Address'] = ipv4_address
-
- if ipv6_address:
- ipam_config['IPv6Address'] = ipv6_address
-
- if link_local_ips is not None:
- if version_lt(version, '1.24'):
- raise errors.InvalidVersion(
- 'link_local_ips is not supported for API version < 1.24'
- )
- ipam_config['LinkLocalIPs'] = link_local_ips
-
- if ipam_config:
- endpoint_config['IPAMConfig'] = ipam_config
-
- return endpoint_config
-
-
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
@@ -964,7 +531,11 @@ def parse_env_file(env_file):
if line[0] == '#':
continue
- parse_line = line.strip().split('=', 1)
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
@@ -993,147 +564,8 @@ def format_environment(environment):
return [format_env(*var) for var in six.iteritems(environment)]
-def create_container_config(
- version, image, command, hostname=None, user=None, detach=False,
- stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
- dns=None, volumes=None, volumes_from=None, network_disabled=False,
- entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
- labels=None, volume_driver=None, stop_signal=None, networking_config=None,
-):
- if isinstance(command, six.string_types):
- command = split_command(command)
-
- if isinstance(entrypoint, six.string_types):
- entrypoint = split_command(entrypoint)
-
- if isinstance(environment, dict):
- environment = format_environment(environment)
-
- if labels is not None and compare_version('1.18', version) < 0:
- raise errors.InvalidVersion(
- 'labels were only introduced in API version 1.18'
- )
-
- if cpuset is not None or cpu_shares is not None:
- if version_gte(version, '1.18'):
- warnings.warn(
- 'The cpuset_cpus and cpu_shares options have been moved to '
- 'host_config in API version 1.18, and will be removed',
- DeprecationWarning
- )
-
- if stop_signal is not None and compare_version('1.21', version) < 0:
- raise errors.InvalidVersion(
- 'stop_signal was only introduced in API version 1.21'
- )
-
- if compare_version('1.19', version) < 0:
- if volume_driver is not None:
- raise errors.InvalidVersion(
- 'Volume drivers were only introduced in API version 1.19'
- )
- mem_limit = mem_limit if mem_limit is not None else 0
- memswap_limit = memswap_limit if memswap_limit is not None else 0
- else:
- if mem_limit is not None:
- raise errors.InvalidVersion(
- 'mem_limit has been moved to host_config in API version 1.19'
- )
-
- if memswap_limit is not None:
- raise errors.InvalidVersion(
- 'memswap_limit has been moved to host_config in API '
- 'version 1.19'
- )
-
- if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
-
- if mem_limit is not None:
- mem_limit = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- memswap_limit = parse_bytes(memswap_limit)
-
- if isinstance(ports, list):
- exposed_ports = {}
- for port_definition in ports:
- port = port_definition
- proto = 'tcp'
- if isinstance(port_definition, tuple):
- if len(port_definition) == 2:
- proto = port_definition[1]
- port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
- ports = exposed_ports
-
- if isinstance(volumes, six.string_types):
- volumes = [volumes, ]
-
- if isinstance(volumes, list):
- volumes_dict = {}
- for vol in volumes:
- volumes_dict[vol] = {}
- volumes = volumes_dict
-
- if volumes_from:
- if not isinstance(volumes_from, six.string_types):
- volumes_from = ','.join(volumes_from)
- else:
- # Force None, an empty list or dict causes client.start to fail
- volumes_from = None
-
- attach_stdin = False
- attach_stdout = False
- attach_stderr = False
- stdin_once = False
-
- if not detach:
- attach_stdout = True
- attach_stderr = True
-
- if stdin_open:
- attach_stdin = True
- stdin_once = True
-
- if compare_version('1.10', version) >= 0:
- message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to host_config')
- if dns is not None:
- raise errors.InvalidVersion(message.format('dns'))
- if volumes_from is not None:
- raise errors.InvalidVersion(message.format('volumes_from'))
-
- return {
- 'Hostname': hostname,
- 'Domainname': domainname,
- 'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
- 'Tty': tty,
- 'OpenStdin': stdin_open,
- 'StdinOnce': stdin_once,
- 'Memory': mem_limit,
- 'AttachStdin': attach_stdin,
- 'AttachStdout': attach_stdout,
- 'AttachStderr': attach_stderr,
- 'Env': environment,
- 'Cmd': command,
- 'Dns': dns,
- 'Image': image,
- 'Volumes': volumes,
- 'VolumesFrom': volumes_from,
- 'NetworkDisabled': network_disabled,
- 'Entrypoint': entrypoint,
- 'CpuShares': cpu_shares,
- 'Cpuset': cpuset,
- 'CpusetCpus': cpuset,
- 'WorkingDir': working_dir,
- 'MemorySwap': memswap_limit,
- 'HostConfig': host_config,
- 'NetworkingConfig': networking_config,
- 'MacAddress': mac_address,
- 'Labels': labels,
- 'VolumeDriver': volume_driver,
- 'StopSignal': stop_signal
- }
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/docker/version.py b/docker/version.py
index 27d014c..af1bd5b 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.10.6"
+version = "2.4.2"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/docker_py.egg-info/PKG-INFO b/docker_py.egg-info/PKG-INFO
deleted file mode 100644
index cc96266..0000000
--- a/docker_py.egg-info/PKG-INFO
+++ /dev/null
@@ -1,61 +0,0 @@
-Metadata-Version: 1.1
-Name: docker-py
-Version: 1.10.6
-Summary: Python client for Docker.
-Home-page: https://github.com/docker/docker-py/
-Author: Joffrey F
-Author-email: joffrey@docker.com
-License: UNKNOWN
-Description: docker-py
- =========
-
- |Build Status|
-
- A Python library for the Docker Remote API. It does everything the
- ``docker`` command does, but from within Python – run containers, manage
- them, pull/push images, etc.
-
- Installation
- ------------
-
- The latest stable version is always available on PyPi.
-
- ::
-
- pip install docker-py
-
- Documentation
- -------------
-
- |Documentation Status|
-
- `Read the full documentation
- here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
- available in the ``docs/`` directory.
-
- License
- -------
-
- Docker is licensed under the Apache License, Version 2.0. See LICENSE
- for full license text
-
- .. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
- :target: https://travis-ci.org/docker/docker-py
- .. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
- :target: https://readthedocs.org/projects/docker-py/?badge=latest
-
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Other Environment
-Classifier: Intended Audience :: Developers
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Topic :: Utilities
-Classifier: License :: OSI Approved :: Apache Software License
diff --git a/docker_py.egg-info/SOURCES.txt b/docker_py.egg-info/SOURCES.txt
deleted file mode 100644
index 10cd950..0000000
--- a/docker_py.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-LICENSE
-MANIFEST.in
-README.md
-README.rst
-requirements.txt
-setup.cfg
-setup.py
-test-requirements.txt
-docker/__init__.py
-docker/client.py
-docker/constants.py
-docker/errors.py
-docker/tls.py
-docker/version.py
-docker/api/__init__.py
-docker/api/build.py
-docker/api/container.py
-docker/api/daemon.py
-docker/api/exec_api.py
-docker/api/image.py
-docker/api/network.py
-docker/api/service.py
-docker/api/swarm.py
-docker/api/volume.py
-docker/auth/__init__.py
-docker/auth/auth.py
-docker/ssladapter/__init__.py
-docker/ssladapter/ssladapter.py
-docker/transport/__init__.py
-docker/transport/npipeconn.py
-docker/transport/npipesocket.py
-docker/transport/unixconn.py
-docker/types/__init__.py
-docker/types/base.py
-docker/types/containers.py
-docker/types/services.py
-docker/types/swarm.py
-docker/utils/__init__.py
-docker/utils/decorators.py
-docker/utils/socket.py
-docker/utils/types.py
-docker/utils/utils.py
-docker/utils/ports/__init__.py
-docker/utils/ports/ports.py
-docker_py.egg-info/PKG-INFO
-docker_py.egg-info/SOURCES.txt
-docker_py.egg-info/dependency_links.txt
-docker_py.egg-info/not-zip-safe
-docker_py.egg-info/requires.txt
-docker_py.egg-info/top_level.txt
-tests/__init__.py
-tests/base.py
-tests/helpers.py
-tests/integration/__init__.py
-tests/integration/api_test.py
-tests/integration/build_test.py
-tests/integration/conftest.py
-tests/integration/container_test.py
-tests/integration/exec_test.py
-tests/integration/image_test.py
-tests/integration/network_test.py
-tests/integration/regression_test.py
-tests/integration/service_test.py
-tests/integration/swarm_test.py
-tests/integration/volume_test.py
-tests/unit/__init__.py
-tests/unit/api_test.py
-tests/unit/auth_test.py
-tests/unit/build_test.py
-tests/unit/client_test.py
-tests/unit/container_test.py
-tests/unit/exec_test.py
-tests/unit/fake_api.py
-tests/unit/fake_stat.py
-tests/unit/image_test.py
-tests/unit/network_test.py
-tests/unit/ssladapter_test.py
-tests/unit/utils_test.py
-tests/unit/volume_test.py
-tests/unit/testdata/certs/ca.pem
-tests/unit/testdata/certs/cert.pem
-tests/unit/testdata/certs/key.pem \ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
index 19cf102..81bd901 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,6 +3,7 @@ universal = 1
[metadata]
description_file = README.rst
+license = Apache License 2.0
[egg_info]
tag_build =
diff --git a/setup.py b/setup.py
index 4538d91..31180d2 100644
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,26 @@
#!/usr/bin/env python
+from __future__ import print_function
+
+import codecs
import os
import sys
-from setuptools import setup
+import pip
+
+from setuptools import setup, find_packages
+if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:
+ print(
+ 'ERROR: "docker-py" needs to be uninstalled before installing this'
+ ' package:\npip uninstall docker-py', file=sys.stderr
+ )
+ sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2, != 2.11.0',
+ 'requests >= 2.5.2, != 2.11.0, != 2.12.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
@@ -20,6 +31,9 @@ if sys.platform == 'win32':
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
+ # While not imported explicitly, the ipaddress module is required for
+ # ssl_match_hostname to verify hosts match with certificates via
+ # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
}
@@ -32,7 +46,7 @@ with open('./test-requirements.txt') as test_reqs_txt:
long_description = ''
try:
- with open('./README.rst') as readme_rst:
+ with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
@@ -40,29 +54,24 @@ except IOError:
pass
setup(
- name="docker-py",
+ name="docker",
version=version,
- description="Python client for Docker.",
+ description="A Python library for the Docker Engine API.",
long_description=long_description,
- url='https://github.com/docker/docker-py/',
- packages=[
- 'docker', 'docker.api', 'docker.auth', 'docker.transport',
- 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
- 'docker.types',
- ],
+ url='https://github.com/docker/docker-py',
+ packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
diff --git a/tests/base.py b/tests/base.py
deleted file mode 100644
index a2c01fc..0000000
--- a/tests/base.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import sys
-import unittest
-
-import pytest
-import six
-
-import docker
-
-
-class BaseTestCase(unittest.TestCase):
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(BaseTestCase, self).assertIn(object, collection)
-
-
-def requires_api_version(version):
- return pytest.mark.skipif(
- docker.utils.version_lt(
- docker.constants.DEFAULT_DOCKER_API_VERSION, version
- ),
- reason="API version is too low (< {0})".format(version)
- )
-
-
-class Cleanup(object):
- if sys.version_info < (2, 7):
- # Provide a basic implementation of addCleanup for Python < 2.7
- def __init__(self, *args, **kwargs):
- super(Cleanup, self).__init__(*args, **kwargs)
- self._cleanups = []
-
- def tearDown(self):
- super(Cleanup, self).tearDown()
- ok = True
- while self._cleanups:
- fn, args, kwargs = self._cleanups.pop(-1)
- try:
- fn(*args, **kwargs)
- except KeyboardInterrupt:
- raise
- except:
- ok = False
- if not ok:
- raise
-
- def addCleanup(self, function, *args, **kwargs):
- self._cleanups.append((function, args, kwargs))
diff --git a/tests/helpers.py b/tests/helpers.py
index 40baef9..124ae2d 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,15 +1,13 @@
+import functools
import os
import os.path
-import shutil
+import random
import tarfile
import tempfile
-import unittest
+import time
import docker
-import six
-
-BUSYBOX = 'busybox:buildroot-2014.02'
-EXEC_DRIVER = []
+import pytest
def make_tree(dirs, files):
@@ -45,86 +43,62 @@ def untar_file(tardata, filename):
return result
-def docker_client(**kwargs):
- return docker.Client(**docker_client_kwargs(**kwargs))
-
-
-def docker_client_kwargs(**kwargs):
- client_kwargs = docker.utils.kwargs_from_env(assert_hostname=False)
- client_kwargs.update(kwargs)
- return client_kwargs
-
-
-class BaseTestCase(unittest.TestCase):
- tmp_imgs = []
- tmp_containers = []
- tmp_folders = []
- tmp_volumes = []
-
- def setUp(self):
- if six.PY2:
- self.assertRegex = self.assertRegexpMatches
- self.assertCountEqual = self.assertItemsEqual
- self.client = docker_client(timeout=60)
- self.tmp_imgs = []
- self.tmp_containers = []
- self.tmp_folders = []
- self.tmp_volumes = []
- self.tmp_networks = []
-
- def tearDown(self):
- for img in self.tmp_imgs:
- try:
- self.client.remove_image(img)
- except docker.errors.APIError:
- pass
- for container in self.tmp_containers:
- try:
- self.client.stop(container, timeout=1)
- self.client.remove_container(container)
- except docker.errors.APIError:
- pass
- for network in self.tmp_networks:
- try:
- self.client.remove_network(network)
- except docker.errors.APIError:
- pass
- for folder in self.tmp_folders:
- shutil.rmtree(folder)
-
- for volume in self.tmp_volumes:
- try:
- self.client.remove_volume(volume)
- except docker.errors.APIError:
- pass
-
- self.client.close()
-
- def run_container(self, *args, **kwargs):
- container = self.client.create_container(*args, **kwargs)
- self.tmp_containers.append(container)
- self.client.start(container)
- exitcode = self.client.wait(container)
-
- if exitcode != 0:
- output = self.client.logs(container)
- raise Exception(
- "Container exited with code {}:\n{}"
- .format(exitcode, output))
-
- return container
-
- def create_and_start(self, image='busybox', command='top', **kwargs):
- container = self.client.create_container(
- image=image, command=command, **kwargs)
- self.tmp_containers.append(container)
- self.client.start(container)
- return container
-
- def execute(self, container, cmd, exit_code=0, **kwargs):
- exc = self.client.exec_create(container, cmd, **kwargs)
- output = self.client.exec_start(exc)
- actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
- msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
- " ".join(cmd), exit_code, actual_exit_code, output)
- assert actual_exit_code == exit_code, msg
+def requires_api_version(version):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ return pytest.mark.skipif(
+ docker.utils.version_lt(test_version, version),
+ reason="API version is too low (< {0})".format(version)
+ )
+
+
+def requires_experimental(until=None):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
+ def req_exp(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if not self.client.info()['ExperimentalBuild']:
+ pytest.skip('Feature requires Docker Engine experimental mode')
+ return f(self, *args, **kwargs)
+
+ if until and docker.utils.version_gte(test_version, until):
+ return f
+ return wrapped
+
+ return req_exp
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def random_name():
+ return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+
+def force_leave_swarm(client):
+ """Actually force leave a Swarm. There seems to be a bug in Swarm that
+ occasionally throws "context deadline exceeded" errors when leaving."""
+ while True:
+ try:
+ if isinstance(client, docker.DockerClient):
+ return client.swarm.leave(force=True)
+ return client.leave_swarm(force=True) # elif APIClient
+ except docker.errors.APIError as e:
+ if e.explanation == "context deadline exceeded":
+ continue
+ else:
+ return
+
+
+def swarm_listen_addr():
+ return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
diff --git a/tests/integration/build_test.py b/tests/integration/api_build_test.py
index c7a5fbe..609964f 100644
--- a/tests/integration/build_test.py
+++ b/tests/integration/api_build_test.py
@@ -3,19 +3,19 @@ import os
import shutil
import tempfile
-import six
-
from docker import errors
-from .. import helpers
-from ..base import requires_api_version
+import pytest
+import six
+from .base import BaseAPIIntegrationTest
+from ..helpers import requires_api_version
-class BuildTest(helpers.BaseTestCase):
+
+class BuildTest(BaseAPIIntegrationTest):
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -32,7 +32,6 @@ class BuildTest(helpers.BaseTestCase):
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -54,7 +53,6 @@ class BuildTest(helpers.BaseTestCase):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
- 'MAINTAINER docker-py',
'ADD . /test',
]))
@@ -118,6 +116,134 @@ class BuildTest(helpers.BaseTestCase):
info = self.client.inspect_image('buildargs')
self.assertEqual(info['Config']['User'], 'OK')
+ @requires_api_version('1.22')
+ def test_build_shmsize(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ 'CMD sh -c "echo \'Hello, World!\'"',
+ ]).encode('ascii'))
+
+ tag = 'shmsize'
+ shmsize = 134217728
+
+ stream = self.client.build(
+ fileobj=script, tag=tag, shmsize=shmsize
+ )
+ self.tmp_imgs.append(tag)
+ for chunk in stream:
+ pass
+
+ # There is currently no way to get the shmsize
+ # that was used to build the image
+
+ @requires_api_version('1.23')
+ def test_build_labels(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM scratch',
+ ]).encode('ascii'))
+
+ labels = {'test': 'OK'}
+
+ stream = self.client.build(
+ fileobj=script, tag='labels', labels=labels
+ )
+ self.tmp_imgs.append('labels')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('labels')
+ self.assertEqual(info['Config']['Labels'], labels)
+
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
+ @requires_api_version('1.29')
+ def test_build_container_with_target(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox as first',
+ 'RUN mkdir -p /tmp/test',
+ 'RUN touch /tmp/silence.tar.gz',
+ 'FROM alpine:latest',
+ 'WORKDIR /root/'
+ 'COPY --from=first /tmp/silence.tar.gz .',
+ 'ONBUILD RUN echo "This should not be in the final image"'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, target='first', tag='build1'
+ )
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ info = self.client.inspect_image('build1')
+ self.assertEqual(info['Config']['OnBuild'], [])
+
+ @requires_api_version('1.25')
+ def test_build_with_network_mode(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'RUN wget http://google.com'
+ ]).encode('ascii'))
+
+ stream = self.client.build(
+ fileobj=script, network_mode='bridge',
+ tag='dockerpytest_bridgebuild'
+ )
+
+ self.tmp_imgs.append('dockerpytest_bridgebuild')
+ for chunk in stream:
+ pass
+
+ assert self.client.inspect_image('dockerpytest_bridgebuild')
+
+ script.seek(0)
+ stream = self.client.build(
+ fileobj=script, network_mode='none',
+ tag='dockerpytest_nonebuild', nocache=True, decode=True
+ )
+
+ self.tmp_imgs.append('dockerpytest_nonebuild')
+ logs = [chunk for chunk in stream]
+ assert 'errorDetail' in logs[-1]
+ assert logs[-1]['errorDetail']['code'] == 1
+
+ with pytest.raises(errors.NotFound):
+ self.client.inspect_image('dockerpytest_nonebuild')
+
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
@@ -144,7 +270,6 @@ class BuildTest(helpers.BaseTestCase):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
- 'MAINTAINER docker-py',
'ADD . /test',
]))
diff --git a/tests/integration/api_test.py b/tests/integration/api_client_test.py
index 67ed068..cc64158 100644
--- a/tests/integration/api_test.py
+++ b/tests/integration/api_client_test.py
@@ -6,11 +6,12 @@ import unittest
import warnings
import docker
+from docker.utils import kwargs_from_env
-from .. import helpers
+from .base import BaseAPIIntegrationTest
-class InformationTest(helpers.BaseTestCase):
+class InformationTest(BaseAPIIntegrationTest):
def test_version(self):
res = self.client.version()
self.assertIn('GoVersion', res)
@@ -23,58 +24,8 @@ class InformationTest(helpers.BaseTestCase):
self.assertIn('Images', res)
self.assertIn('Debug', res)
- def test_search(self):
- self.client = helpers.docker_client(timeout=10)
- res = self.client.search('busybox')
- self.assertTrue(len(res) >= 1)
- base_img = [x for x in res if x['name'] == 'busybox']
- self.assertEqual(len(base_img), 1)
- self.assertIn('description', base_img[0])
-
-class LinkTest(helpers.BaseTestCase):
- def test_remove_link(self):
- # Create containers
- container1 = self.client.create_container(
- helpers.BUSYBOX, 'cat', detach=True, stdin_open=True
- )
- container1_id = container1['Id']
- self.tmp_containers.append(container1_id)
- self.client.start(container1_id)
-
- # Create Link
- # we don't want the first /
- link_path = self.client.inspect_container(container1_id)['Name'][1:]
- link_alias = 'mylink'
-
- container2 = self.client.create_container(
- helpers.BUSYBOX, 'cat', host_config=self.client.create_host_config(
- links={link_path: link_alias}
- )
- )
- container2_id = container2['Id']
- self.tmp_containers.append(container2_id)
- self.client.start(container2_id)
-
- # Remove link
- linked_name = self.client.inspect_container(container2_id)['Name'][1:]
- link_name = '%s/%s' % (linked_name, link_alias)
- self.client.remove_container(link_name, link=True)
-
- # Link is gone
- containers = self.client.containers(all=True)
- retrieved = [x for x in containers if link_name in x['Names']]
- self.assertEqual(len(retrieved), 0)
-
- # Containers are still there
- retrieved = [
- x for x in containers if x['Id'].startswith(container1_id) or
- x['Id'].startswith(container2_id)
- ]
- self.assertEqual(len(retrieved), 2)
-
-
-class LoadConfigTest(helpers.BaseTestCase):
+class LoadConfigTest(BaseAPIIntegrationTest):
def test_load_legacy_config(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
@@ -113,7 +64,7 @@ class LoadConfigTest(helpers.BaseTestCase):
class AutoDetectVersionTest(unittest.TestCase):
def test_client_init(self):
- client = helpers.docker_client(version='auto')
+ client = docker.APIClient(version='auto', **kwargs_from_env())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
@@ -121,25 +72,15 @@ class AutoDetectVersionTest(unittest.TestCase):
self.assertEqual(client_version, api_version_2)
client.close()
- def test_auto_client(self):
- client = docker.AutoVersionClient(**helpers.docker_client_kwargs())
- client_version = client._version
- api_version = client.version(api_version=False)['ApiVersion']
- self.assertEqual(client_version, api_version)
- api_version_2 = client.version()['ApiVersion']
- self.assertEqual(client_version, api_version_2)
- client.close()
- with self.assertRaises(docker.errors.DockerException):
- docker.AutoVersionClient(
- **helpers.docker_client_kwargs(version='1.11')
- )
-
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
- self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
- timeout=self.timeout)
+ self.client = docker.api.APIClient(
+ version=docker.constants.MINIMUM_DOCKER_API_VERSION,
+ base_url='http://192.168.10.2:4243',
+ timeout=self.timeout
+ )
def test_timeout(self):
start = time.time()
@@ -167,7 +108,7 @@ class UnixconnTest(unittest.TestCase):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- client = helpers.docker_client()
+ client = docker.APIClient(version='auto', **kwargs_from_env())
client.images()
client.close()
del client
diff --git a/tests/integration/container_test.py b/tests/integration/api_container_test.py
index e390acb..f8b474a 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/api_container_test.py
@@ -6,16 +6,17 @@ import docker
from docker.constants import IS_WINDOWS_PLATFORM
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
+
import pytest
+
import six
-from ..base import requires_api_version
+from .base import BUSYBOX, BaseAPIIntegrationTest
from .. import helpers
-
-BUSYBOX = helpers.BUSYBOX
+from ..helpers import requires_api_version
-class ListContainersTest(helpers.BaseTestCase):
+class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
@@ -35,7 +36,7 @@ class ListContainersTest(helpers.BaseTestCase):
self.assertIn('Status', retrieved)
-class CreateContainerTest(helpers.BaseTestCase):
+class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
res = self.client.create_container(BUSYBOX, 'true')
@@ -119,9 +120,9 @@ class CreateContainerTest(helpers.BaseTestCase):
self.client.wait(id)
with self.assertRaises(docker.errors.APIError) as exc:
self.client.remove_container(id)
- err = exc.exception.response.text
+ err = exc.exception.explanation
self.assertIn(
- 'You cannot remove a running container', err
+ 'You cannot remove ', err
)
self.client.remove_container(id, force=True)
@@ -256,7 +257,7 @@ class CreateContainerTest(helpers.BaseTestCase):
self.assertIn('1001', groups)
def test_valid_log_driver_and_log_opt(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='json-file',
config={'max-file': '100'}
)
@@ -275,7 +276,7 @@ class CreateContainerTest(helpers.BaseTestCase):
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='asdf-nope',
config={}
)
@@ -290,10 +291,10 @@ class CreateContainerTest(helpers.BaseTestCase):
)
self.client.start(container)
- assert six.b(expected_msg) in excinfo.value.explanation
+ assert excinfo.value.explanation == expected_msg
def test_valid_no_log_driver_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="",
config={'max-file': '100'}
)
@@ -312,7 +313,7 @@ class CreateContainerTest(helpers.BaseTestCase):
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="json-file",
config=None
)
@@ -362,13 +363,6 @@ class CreateContainerTest(helpers.BaseTestCase):
host_config = inspect['HostConfig']
self.assertIn('MemorySwappiness', host_config)
- def test_create_host_config_exception_raising(self):
- self.assertRaises(TypeError,
- self.client.create_host_config, mem_swappiness='40')
-
- self.assertRaises(ValueError,
- self.client.create_host_config, pid_mode='40')
-
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
BUSYBOX,
@@ -398,8 +392,80 @@ class CreateContainerTest(helpers.BaseTestCase):
config = self.client.inspect_container(container)
assert config['HostConfig']['Tmpfs'] == tmpfs
+ @requires_api_version('1.24')
+ def test_create_with_isolation(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ isolation='default'
+ )
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Isolation'] == 'default'
+
+ @requires_api_version('1.25')
+ def test_create_with_auto_remove(self):
+ host_config = self.client.create_host_config(
+ auto_remove=True
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['AutoRemove'] is True
+
+ @requires_api_version('1.25')
+ def test_create_with_stop_timeout(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], stop_timeout=25
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['Config']['StopTimeout'] == 25
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ def test_create_with_storage_opt(self):
+ host_config = self.client.create_host_config(
+ storage_opt={'size': '120G'}
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['StorageOpt'] == {
+ 'size': '120G'
+ }
+
+ @requires_api_version('1.25')
+ def test_create_with_init(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init=True
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['Init'] is True
+
+ @pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
+ @requires_api_version('1.25')
+ def test_create_with_init_path(self):
+ ctnr = self.client.create_container(
+ BUSYBOX, 'true',
+ host_config=self.client.create_host_config(
+ init_path="/usr/libexec/docker-init"
+ )
+ )
+ self.tmp_containers.append(ctnr['Id'])
+ config = self.client.inspect_container(ctnr)
+ assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
-class VolumeBindTest(helpers.BaseTestCase):
+
+class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
super(VolumeBindTest, self).setUp()
@@ -494,7 +560,7 @@ class VolumeBindTest(helpers.BaseTestCase):
@requires_api_version('1.20')
-class ArchiveTest(helpers.BaseTestCase):
+class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
@@ -574,7 +640,7 @@ class ArchiveTest(helpers.BaseTestCase):
self.assertIn('bar/', results)
-class RenameContainerTest(helpers.BaseTestCase):
+class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
@@ -590,7 +656,7 @@ class RenameContainerTest(helpers.BaseTestCase):
self.assertEqual('/{0}'.format(name), inspect['Name'])
-class StartContainerTest(helpers.BaseTestCase):
+class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
@@ -644,7 +710,7 @@ class StartContainerTest(helpers.BaseTestCase):
self.assertEqual(exitcode, 0, msg=cmd)
-class WaitTest(helpers.BaseTestCase):
+class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
@@ -672,7 +738,7 @@ class WaitTest(helpers.BaseTestCase):
self.assertEqual(inspect['State']['ExitCode'], exitcode)
-class LogsTest(helpers.BaseTestCase):
+class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
@@ -744,7 +810,7 @@ Line2'''
self.assertEqual(logs, ''.encode(encoding='ascii'))
-class DiffTest(helpers.BaseTestCase):
+class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -772,7 +838,7 @@ class DiffTest(helpers.BaseTestCase):
self.assertEqual(test_diff[0]['Kind'], 1)
-class StopTest(helpers.BaseTestCase):
+class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -799,7 +865,7 @@ class StopTest(helpers.BaseTestCase):
self.assertEqual(state['Running'], False)
-class KillTest(helpers.BaseTestCase):
+class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -876,7 +942,7 @@ class KillTest(helpers.BaseTestCase):
self.assertEqual(state['Running'], False, state)
-class PortTest(helpers.BaseTestCase):
+class PortTest(BaseAPIIntegrationTest):
def test_port(self):
port_bindings = {
@@ -907,7 +973,7 @@ class PortTest(helpers.BaseTestCase):
self.client.kill(id)
-class ContainerTopTest(helpers.BaseTestCase):
+class ContainerTopTest(BaseAPIIntegrationTest):
def test_top(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60']
@@ -947,7 +1013,7 @@ class ContainerTopTest(helpers.BaseTestCase):
self.assertEqual(res['Processes'][0][10], 'sleep 60')
-class RestartContainerTest(helpers.BaseTestCase):
+class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -988,7 +1054,7 @@ class RestartContainerTest(helpers.BaseTestCase):
self.client.kill(id)
-class RemoveContainerTest(helpers.BaseTestCase):
+class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
@@ -1010,7 +1076,7 @@ class RemoveContainerTest(helpers.BaseTestCase):
self.assertEqual(len(res), 0)
-class AttachContainerTest(helpers.BaseTestCase):
+class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
@@ -1041,7 +1107,7 @@ class AttachContainerTest(helpers.BaseTestCase):
self.assertEqual(data.decode('utf-8'), line)
-class PauseTest(helpers.BaseTestCase):
+class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -1070,7 +1136,23 @@ class PauseTest(helpers.BaseTestCase):
self.assertEqual(state['Paused'], False)
-class GetContainerStatsTest(helpers.BaseTestCase):
+class PruneTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.25')
+ def test_prune_containers(self):
+ container1 = self.client.create_container(
+ BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
+ )
+ container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.client.start(container1)
+ self.client.start(container2)
+ self.client.wait(container1)
+ result = self.client.prune_containers()
+ assert container1['Id'] in result['ContainersDeleted']
+ assert result['SpaceReclaimed'] > 0
+ assert container2['Id'] not in result['ContainersDeleted']
+
+
+class GetContainerStatsTest(BaseAPIIntegrationTest):
@requires_api_version('1.19')
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
@@ -1101,7 +1183,7 @@ class GetContainerStatsTest(helpers.BaseTestCase):
self.assertIn(key, chunk)
-class ContainerUpdateTest(helpers.BaseTestCase):
+class ContainerUpdateTest(BaseAPIIntegrationTest):
@requires_api_version('1.22')
def test_update_container(self):
old_mem_limit = 400 * 1024 * 1024
@@ -1117,8 +1199,38 @@ class ContainerUpdateTest(helpers.BaseTestCase):
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
+ @requires_api_version('1.23')
+ def test_restart_policy_update(self):
+ old_restart_policy = {
+ 'MaximumRetryCount': 0,
+ 'Name': 'always'
+ }
+ new_restart_policy = {
+ 'MaximumRetryCount': 42,
+ 'Name': 'on-failure'
+ }
+ container = self.client.create_container(
+ BUSYBOX, ['sleep', '60'],
+ host_config=self.client.create_host_config(
+ restart_policy=old_restart_policy
+ )
+ )
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ self.client.update_container(container,
+ restart_policy=new_restart_policy)
+ inspect_data = self.client.inspect_container(container)
+ self.assertEqual(
+ inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'],
+ new_restart_policy['MaximumRetryCount']
+ )
+ self.assertEqual(
+ inspect_data['HostConfig']['RestartPolicy']['Name'],
+ new_restart_policy['Name']
+ )
+
-class ContainerCPUTest(helpers.BaseTestCase):
+class ContainerCPUTest(BaseAPIIntegrationTest):
@requires_api_version('1.18')
def test_container_cpu_shares(self):
cpu_shares = 512
@@ -1144,3 +1256,54 @@ class ContainerCPUTest(helpers.BaseTestCase):
self.client.start(container)
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
+
+ @requires_api_version('1.25')
+ def test_create_with_runtime(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], runtime='runc'
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Runtime'] == 'runc'
+
+
+class LinkTest(BaseAPIIntegrationTest):
+ def test_remove_link(self):
+ # Create containers
+ container1 = self.client.create_container(
+ BUSYBOX, 'cat', detach=True, stdin_open=True
+ )
+ container1_id = container1['Id']
+ self.tmp_containers.append(container1_id)
+ self.client.start(container1_id)
+
+ # Create Link
+ # we don't want the first /
+ link_path = self.client.inspect_container(container1_id)['Name'][1:]
+ link_alias = 'mylink'
+
+ container2 = self.client.create_container(
+ BUSYBOX, 'cat', host_config=self.client.create_host_config(
+ links={link_path: link_alias}
+ )
+ )
+ container2_id = container2['Id']
+ self.tmp_containers.append(container2_id)
+ self.client.start(container2_id)
+
+ # Remove link
+ linked_name = self.client.inspect_container(container2_id)['Name'][1:]
+ link_name = '%s/%s' % (linked_name, link_alias)
+ self.client.remove_container(link_name, link=True)
+
+ # Link is gone
+ containers = self.client.containers(all=True)
+ retrieved = [x for x in containers if link_name in x['Names']]
+ self.assertEqual(len(retrieved), 0)
+
+ # Containers are still there
+ retrieved = [
+ x for x in containers if x['Id'].startswith(container1_id) or
+ x['Id'].startswith(container2_id)
+ ]
+ self.assertEqual(len(retrieved), 2)
diff --git a/tests/integration/exec_test.py b/tests/integration/api_exec_test.py
index f377e09..7a65041 100644
--- a/tests/integration/exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -1,12 +1,11 @@
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
-from .. import helpers
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import requires_api_version
-BUSYBOX = helpers.BUSYBOX
-
-class ExecTest(helpers.BaseTestCase):
+class ExecTest(BaseAPIIntegrationTest):
def test_execute_command(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
@@ -95,6 +94,21 @@ class ExecTest(helpers.BaseTestCase):
data = read_exactly(socket, next_size)
self.assertEqual(data.decode('utf-8'), line)
+ def test_exec_start_detached(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ container_id = container['Id']
+ self.client.start(container_id)
+ self.tmp_containers.append(container_id)
+
+ exec_id = self.client.exec_create(
+ container_id, ['printf', "asdqwe"])
+ self.assertIn('Id', exec_id)
+
+ response = self.client.exec_start(exec_id, detach=True)
+
+ self.assertEqual(response, "")
+
def test_exec_inspect(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
@@ -108,3 +122,17 @@ class ExecTest(helpers.BaseTestCase):
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
+
+ @requires_api_version('1.25')
+ def test_exec_command_with_env(self):
+ container = self.client.create_container(BUSYBOX, 'cat',
+ detach=True, stdin_open=True)
+ id = container['Id']
+ self.client.start(id)
+ self.tmp_containers.append(id)
+
+ res = self.client.exec_create(id, 'env', environment=["X=Y"])
+ assert 'Id' in res
+
+ exec_log = self.client.exec_start(res)
+ assert b'X=Y\n' in exec_log
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
new file mode 100644
index 0000000..211042d
--- /dev/null
+++ b/tests/integration/api_healthcheck_test.py
@@ -0,0 +1,67 @@
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from .. import helpers
+
+SECOND = 1000000000
+
+
+def wait_on_health_status(client, container, status):
+ def condition():
+ res = client.inspect_container(container)
+ return res['State']['Health']['Status'] == status
+ return helpers.wait_on_condition(condition)
+
+
+class HealthcheckTest(BaseAPIIntegrationTest):
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_shell_command(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ self.tmp_containers.append(container)
+
+ res = self.client.inspect_container(container)
+ assert res['Config']['Healthcheck']['Test'] == \
+ ['CMD-SHELL', 'echo "hello world"']
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_passes(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="true",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_fails(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="false",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "unhealthy")
+
+ @helpers.requires_api_version('1.29')
+ def test_healthcheck_start_period(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="echo 'x' >> /counter.txt && "
+ "test `cat /counter.txt | wc -l` -ge 3",
+ interval=1 * SECOND,
+ timeout=1 * SECOND,
+ retries=1,
+ start_period=3 * SECOND
+ )
+ )
+
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
diff --git a/tests/integration/image_test.py b/tests/integration/api_image_test.py
index 24800f2..917bc50 100644
--- a/tests/integration/image_test.py
+++ b/tests/integration/api_image_test.py
@@ -14,12 +14,11 @@ from six.moves import socketserver
import docker
-from .. import helpers
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest, BUSYBOX
-BUSYBOX = helpers.BUSYBOX
-
-class ListImagesTest(helpers.BaseTestCase):
+class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
@@ -37,13 +36,13 @@ class ListImagesTest(helpers.BaseTestCase):
self.assertEqual(type(res1[0]), six.text_type)
-class PullImageTest(helpers.BaseTestCase):
+class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- res = self.client.pull('hello-world')
+ res = self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
@@ -57,7 +56,8 @@ class PullImageTest(helpers.BaseTestCase):
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
- stream = self.client.pull('hello-world', stream=True, decode=True)
+ stream = self.client.pull(
+ 'hello-world', tag='latest', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
@@ -68,7 +68,7 @@ class PullImageTest(helpers.BaseTestCase):
self.assertIn('Id', img_info)
-class CommitTest(helpers.BaseTestCase):
+class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -103,7 +103,7 @@ class CommitTest(helpers.BaseTestCase):
assert img['Config']['Cmd'] == ['bash']
-class RemoveImageTest(helpers.BaseTestCase):
+class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -119,7 +119,7 @@ class RemoveImageTest(helpers.BaseTestCase):
self.assertEqual(len(res), 0)
-class ImportImageTest(helpers.BaseTestCase):
+class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
@@ -287,3 +287,32 @@ class ImportImageTest(helpers.BaseTestCase):
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
+
+
+@requires_api_version('1.25')
+class PruneImagesTest(BaseAPIIntegrationTest):
+ def test_prune_images(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+
+ # Ensure busybox does not get pruned
+ ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.tmp_containers.append(ctnr)
+
+ self.client.pull('hello-world', tag='latest')
+ self.tmp_imgs.append('hello-world')
+ img_id = self.client.inspect_image('hello-world')['Id']
+ result = self.client.prune_images()
+ assert img_id not in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
+ result = self.client.prune_images({'dangling': False})
+ assert result['SpaceReclaimed'] > 0
+ assert 'hello-world:latest' in [
+ img.get('Untagged') for img in result['ImagesDeleted']
+ ]
+ assert img_id in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
diff --git a/tests/integration/network_test.py b/tests/integration/api_network_test.py
index 5f852ab..5439dd7 100644
--- a/tests/integration/network_test.py
+++ b/tests/integration/api_network_test.py
@@ -1,17 +1,18 @@
-import random
-
import docker
-from docker.utils import create_ipam_config
-from docker.utils import create_ipam_pool
+from docker.types import IPAMConfig, IPAMPool
import pytest
-from .. import helpers
-from ..base import requires_api_version
+from ..helpers import random_name, requires_api_version
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+class TestNetworks(BaseAPIIntegrationTest):
+ def tearDown(self):
+ super(TestNetworks, self).tearDown()
+ self.client.leave_swarm(force=True)
-class TestNetworks(helpers.BaseTestCase):
def create_network(self, *args, **kwargs):
- net_name = u'dockerpy{}'.format(random.getrandbits(24))[:14]
+ net_name = random_name()
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
@@ -19,12 +20,10 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.21')
def test_list_networks(self):
networks = self.client.networks()
- initial_size = len(networks)
net_name, net_id = self.create_network()
networks = self.client.networks()
- self.assertEqual(len(networks), initial_size + 1)
self.assertTrue(net_id in [n['Id'] for n in networks])
networks_by_name = self.client.networks(names=[net_name])
@@ -47,10 +46,10 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.21')
def test_create_network_with_ipam_config(self):
_, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16",
iprange="172.28.5.0/24",
gateway="172.28.5.254",
@@ -84,26 +83,22 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.21')
def test_create_network_with_host_driver_fails(self):
- net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14]
-
with pytest.raises(docker.errors.APIError):
- self.client.create_network(net_name, driver='host')
+ self.client.create_network(random_name(), driver='host')
@requires_api_version('1.21')
def test_remove_network(self):
- initial_size = len(self.client.networks())
-
net_name, net_id = self.create_network()
- self.assertEqual(len(self.client.networks()), initial_size + 1)
+ assert net_name in [n['Name'] for n in self.client.networks()]
self.client.remove_network(net_id)
- self.assertEqual(len(self.client.networks()), initial_size)
+ assert net_name not in [n['Name'] for n in self.client.networks()]
@requires_api_version('1.21')
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -131,7 +126,7 @@ class TestNetworks(helpers.BaseTestCase):
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -158,7 +153,7 @@ class TestNetworks(helpers.BaseTestCase):
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -176,7 +171,7 @@ class TestNetworks(helpers.BaseTestCase):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
@@ -197,7 +192,7 @@ class TestNetworks(helpers.BaseTestCase):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
@@ -221,13 +216,13 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_create_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="132.124.0.0/16")],
+ pool_configs=[IPAMPool(subnet="132.124.0.0/16")],
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -250,13 +245,13 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_create_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="2001:389::1/64")],
+ pool_configs=[IPAMPool(subnet="2001:389::1/64")],
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -279,7 +274,7 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
- 'busybox', 'top',
+ BUSYBOX, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
@@ -357,10 +352,10 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_connect_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16", iprange="172.28.5.0/24",
gateway="172.28.5.254"
)
@@ -385,10 +380,10 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.22')
def test_connect_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="2001:389::1/64", iprange="2001:389::0/96",
gateway="2001:389::ffff"
)
@@ -436,6 +431,37 @@ class TestNetworks(helpers.BaseTestCase):
@requires_api_version('1.23')
def test_create_network_ipv6_enabled(self):
- _, net_id = self.create_network(enable_ipv6=True)
+ _, net_id = self.create_network(
+ enable_ipv6=True, ipam=IPAMConfig(
+ driver='default',
+ pool_configs=[
+ IPAMPool(
+ subnet="2001:389::1/64", iprange="2001:389::0/96",
+ gateway="2001:389::ffff"
+ )
+ ]
+ )
+ )
net = self.client.inspect_network(net_id)
assert net['EnableIPv6'] is True
+
+ @requires_api_version('1.25')
+ def test_create_network_attachable(self):
+ assert self.client.init_swarm('eth0')
+ _, net_id = self.create_network(driver='overlay', attachable=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Attachable'] is True
+
+ @requires_api_version('1.29')
+ def test_create_network_ingress(self):
+ assert self.client.init_swarm('eth0')
+ self.client.remove_network('ingress')
+ _, net_id = self.create_network(driver='overlay', ingress=True)
+ net = self.client.inspect_network(net_id)
+ assert net['Ingress'] is True
+
+ @requires_api_version('1.25')
+ def test_prune_networks(self):
+ net_name, _ = self.create_network()
+ result = self.client.prune_networks()
+ assert net_name in result['NetworksDeleted']
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
new file mode 100644
index 0000000..433d44d
--- /dev/null
+++ b/tests/integration/api_plugin_test.py
@@ -0,0 +1,145 @@
+import os
+
+import docker
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from ..helpers import requires_api_version
+
+SSHFS = 'vieux/sshfs:latest'
+
+
+@requires_api_version('1.25')
+class PluginTest(BaseAPIIntegrationTest):
+ @classmethod
+ def teardown_class(cls):
+ c = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60,
+ **docker.utils.kwargs_from_env()
+ )
+ try:
+ c.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def teardown_method(self, method):
+ try:
+ self.client.disable_plugin(SSHFS)
+ except docker.errors.APIError:
+ pass
+
+ for p in self.tmp_plugins:
+ try:
+ self.client.remove_plugin(p, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def ensure_plugin_installed(self, plugin_name):
+ try:
+ return self.client.inspect_plugin(plugin_name)
+ except docker.errors.NotFound:
+ prv = self.client.plugin_privileges(plugin_name)
+ for d in self.client.pull_plugin(plugin_name, prv):
+ pass
+ return self.client.inspect_plugin(plugin_name)
+
+ def test_enable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ with pytest.raises(docker.errors.APIError):
+ self.client.enable_plugin(SSHFS)
+
+ def test_disable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ self.client.disable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is False
+ with pytest.raises(docker.errors.APIError):
+ self.client.disable_plugin(SSHFS)
+
+ def test_inspect_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.inspect_plugin(SSHFS)
+ assert 'Config' in data
+ assert 'Name' in data
+ assert data['Name'] == SSHFS
+
+ def test_plugin_privileges(self):
+ prv = self.client.plugin_privileges(SSHFS)
+ assert isinstance(prv, list)
+ for item in prv:
+ assert 'Name' in item
+ assert 'Value' in item
+ assert 'Description' in item
+
+ def test_list_plugins(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.plugins()
+ assert len(data) > 0
+ plugin = [p for p in data if p['Name'] == SSHFS][0]
+ assert 'Config' in plugin
+
+ def test_configure_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ self.client.configure_plugin(SSHFS, {
+ 'DEBUG': '1'
+ })
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'Env' in pl_data['Settings']
+ assert 'DEBUG=1' in pl_data['Settings']['Env']
+
+ self.client.configure_plugin(SSHFS, ['DEBUG=0'])
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'DEBUG=0' in pl_data['Settings']['Env']
+
+ def test_remove_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.remove_plugin(SSHFS) is True
+
+ def test_force_remove_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ self.client.enable_plugin(SSHFS)
+ assert self.client.inspect_plugin(SSHFS)['Enabled'] is True
+ assert self.client.remove_plugin(SSHFS, force=True) is True
+
+ def test_install_plugin(self):
+ try:
+ self.client.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.pull_plugin(SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ @requires_api_version('1.26')
+ def test_upgrade_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.upgrade_plugin(SSHFS, SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ def test_create_plugin(self):
+ plugin_data_dir = os.path.join(
+ os.path.dirname(__file__), 'testdata/dummy-plugin'
+ )
+ assert self.client.create_plugin(
+ 'docker-sdk-py/dummy', plugin_data_dir
+ )
+ self.tmp_plugins.append('docker-sdk-py/dummy')
+ data = self.client.inspect_plugin('docker-sdk-py/dummy')
+ assert data['Config']['Entrypoint'] == ['/dummy']
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
new file mode 100644
index 0000000..dcd880f
--- /dev/null
+++ b/tests/integration/api_secret_test.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.25')
+class SecretAPITest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(SecretAPITest, self).setUp()
+ self.init_swarm()
+
+ def tearDown(self):
+ super(SecretAPITest, self).tearDown()
+ force_leave_swarm(self.client)
+
+ def test_create_secret(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_secret_unicode_data(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == secret_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ assert self.client.remove_secret(secret_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_secret(secret_id)
+
+ def test_list_secrets(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ data = self.client.secrets(filters={'names': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == secret_id['ID']
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
new file mode 100644
index 0000000..54111a7
--- /dev/null
+++ b/tests/integration/api_service_test.py
@@ -0,0 +1,478 @@
+# -*- coding: utf-8 -*-
+
+import random
+import time
+
+import docker
+import six
+
+from ..helpers import (
+ force_leave_swarm, requires_api_version, requires_experimental
+)
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class ServiceTest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(ServiceTest, self).setUp()
+ force_leave_swarm(self.client)
+ self.init_swarm()
+
+ def tearDown(self):
+ super(ServiceTest, self).tearDown()
+ for service in self.client.services(filters={'name': 'dockerpytest_'}):
+ try:
+ self.client.remove_service(service['ID'])
+ except docker.errors.APIError:
+ pass
+ force_leave_swarm(self.client)
+
+ def get_service_name(self):
+ return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+ def get_service_container(self, service_name, attempts=20, interval=0.5,
+ include_stopped=False):
+ # There is some delay between the service's creation and the creation
+ # of the service's containers. This method deals with the uncertainty
+ # when trying to retrieve the container associated with a service.
+ while True:
+ containers = self.client.containers(
+ filters={'name': [service_name]}, quiet=True,
+ all=include_stopped
+ )
+ if len(containers) > 0:
+ return containers[0]
+ attempts -= 1
+ if attempts <= 0:
+ return None
+ time.sleep(interval)
+
+ def create_simple_service(self, name=None):
+ if name:
+ name = 'dockerpytest_{0}'.format(name)
+ else:
+ name = self.get_service_name()
+
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ return name, self.client.create_service(task_tmpl, name=name)
+
+ @requires_api_version('1.24')
+ def test_list_services(self):
+ services = self.client.services()
+ assert isinstance(services, list)
+
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+ self.create_simple_service()
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 1
+ assert 'dockerpytest_' in test_services[0]['Spec']['Name']
+
+ def test_inspect_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_inspect_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ svc_info = self.client.inspect_service(svc_name)
+ assert 'ID' in svc_info
+ assert svc_info['ID'] == svc_id['ID']
+
+ def test_remove_service_by_id(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_id)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_remove_service_by_name(self):
+ svc_name, svc_id = self.create_simple_service()
+ assert self.client.remove_service(svc_name)
+ test_services = self.client.services(filters={'name': 'dockerpytest_'})
+ assert len(test_services) == 0
+
+ def test_create_service_simple(self):
+ name, svc_id = self.create_simple_service()
+ assert self.client.inspect_service(svc_id)
+ services = self.client.services(filters={'name': name})
+ assert len(services) == 1
+ assert services[0]['ID'] == svc_id['ID']
+
+ @requires_api_version('1.25')
+ @requires_experimental(until='1.29')
+ def test_service_logs(self):
+ name, svc_id = self.create_simple_service()
+ assert self.get_service_container(name, include_stopped=True)
+ attempts = 20
+ while True:
+ if attempts == 0:
+ self.fail('No service logs produced by endpoint')
+ return
+ logs = self.client.service_logs(svc_id, stdout=True, is_tty=False)
+ try:
+ log_line = next(logs)
+ except StopIteration:
+ attempts -= 1
+ time.sleep(0.1)
+ continue
+ else:
+ break
+
+ if six.PY3:
+ log_line = log_line.decode('utf-8')
+ assert 'hello\n' in log_line
+
+ def test_create_service_custom_log_driver(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ log_cfg = docker.types.DriverConfig('none')
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, log_driver=log_cfg
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'LogDriver' in res_template
+ assert 'Name' in res_template['LogDriver']
+ assert res_template['LogDriver']['Name'] == 'none'
+
+ def test_create_service_with_volume_mount(self):
+ vol_name = self.get_service_name()
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['ls'],
+ mounts=[
+ docker.types.Mount(target='/test', source=vol_name)
+ ]
+ )
+ self.tmp_volumes.append(vol_name)
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Mounts' in cspec
+ assert len(cspec['Mounts']) == 1
+ mount = cspec['Mounts'][0]
+ assert mount['Target'] == '/test'
+ assert mount['Source'] == vol_name
+ assert mount['Type'] == 'volume'
+
+ def test_create_service_with_resources_constraints(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ resources = docker.types.Resources(
+ cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
+ cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, resources=resources
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ res_template = svc_info['Spec']['TaskTemplate']
+ assert 'Resources' in res_template
+ assert res_template['Resources']['Limits'] == resources['Limits']
+ assert res_template['Resources']['Reservations'] == resources[
+ 'Reservations'
+ ]
+
+ def test_create_service_with_update_config(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ parallelism=10, delay=5, failure_action='pause'
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Parallelism'] == uc['Parallelism']
+ assert update_config['Delay'] == uc['Delay']
+ assert update_config['FailureAction'] == uc['FailureAction']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_update_config_monitor(self):
+ container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Monitor'] == uc['Monitor']
+ assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+
+ def test_create_service_with_restart_policy(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ policy = docker.types.RestartPolicy(
+ docker.types.RestartPolicy.condition_types.ANY,
+ delay=5, max_attempts=5
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, restart_policy=policy
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
+ assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
+
+ def test_create_service_with_custom_networks(self):
+ net1 = self.client.create_network(
+ 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net1['Id'])
+ net2 = self.client.create_network(
+ 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
+ )
+ self.tmp_networks.append(net2['Id'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, networks=[
+ 'dockerpytest_1', {'Target': 'dockerpytest_2'}
+ ]
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Networks' in svc_info['Spec']
+ assert svc_info['Spec']['Networks'] == [
+ {'Target': net1['Id']}, {'Target': net2['Id']}
+ ]
+
+ def test_create_service_with_placement(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=['node.id=={}'.format(node_id)]
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
+ {'Constraints': ['node.id=={}'.format(node_id)]})
+
+ def test_create_service_with_placement_object(self):
+ node_id = self.client.nodes()[0]['ID']
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(
+ constraints=['node.id=={}'.format(node_id)]
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.30')
+ def test_create_service_with_placement_platform(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ @requires_api_version('1.27')
+ def test_create_service_with_placement_preferences(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ placemt = docker.types.Placement(preferences=[
+ {'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
+ ])
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec, placement=placemt
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Placement' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
+
+ def test_create_service_with_endpoint_spec(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ endpoint_spec = docker.types.EndpointSpec(ports={
+ 12357: (1990, 'udp'),
+ 12562: (678,),
+ 53243: 8080,
+ })
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, endpoint_spec=endpoint_spec
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ print(svc_info)
+ ports = svc_info['Spec']['EndpointSpec']['Ports']
+ for port in ports:
+ if port['PublishedPort'] == 12562:
+ assert port['TargetPort'] == 678
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 53243:
+ assert port['TargetPort'] == 8080
+ assert port['Protocol'] == 'tcp'
+ elif port['PublishedPort'] == 12357:
+ assert port['TargetPort'] == 1990
+ assert port['Protocol'] == 'udp'
+ else:
+ self.fail('Invalid port specification: {0}'.format(port))
+
+ assert len(ports) == 3
+
+ def test_create_service_with_env(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'Env' in con_spec
+ assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
+
+ @requires_api_version('1.25')
+ def test_create_service_with_tty(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['true'], tty=True
+ )
+ task_tmpl = docker.types.TaskTemplate(
+ container_spec,
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
+ con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ assert 'TTY' in con_spec
+ assert con_spec['TTY'] is True
+
+ def test_create_service_global_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name, mode='global'
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Global' in svc_info['Spec']['Mode']
+
+ def test_create_service_replicated_mode(self):
+ container_spec = docker.types.ContainerSpec(
+ BUSYBOX, ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, name=name,
+ mode=docker.types.ServiceMode('replicated', 5)
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Mode' in svc_info['Spec']
+ assert 'Replicated' in svc_info['Spec']['Mode']
+ assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
+
+ @requires_api_version('1.25')
+ def test_update_service_force_update(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self.client.update_service(name, version_index, task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
+
+ @requires_api_version('1.25')
+ def test_create_service_with_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = b'phantasmagoria of flower view'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ assert self.client.exec_start(exec_id) == secret_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_unicode_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = u'東方花映塚'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['sleep', '999'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ container_secret = self.client.exec_start(exec_id)
+ container_secret = container_secret.decode('utf-8')
+ assert container_secret == secret_data
diff --git a/tests/integration/swarm_test.py b/tests/integration/api_swarm_test.py
index 128628e..666c689 100644
--- a/tests/integration/swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -1,63 +1,53 @@
+import copy
import docker
import pytest
-from ..base import requires_api_version
-from .. import helpers
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
-BUSYBOX = helpers.BUSYBOX
-
-
-class SwarmTest(helpers.BaseTestCase):
+class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
+ force_leave_swarm(self.client)
def tearDown(self):
super(SwarmTest, self).tearDown()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
+ force_leave_swarm(self.client)
@requires_api_version('1.24')
def test_init_swarm_simple(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_force_new_cluster(self):
pytest.skip('Test stalls the engine on 1.12.0')
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
version_1 = self.client.inspect_swarm()['Version']['Index']
- assert self.client.init_swarm('eth0', force_new_cluster=True)
+ assert self.client.init_swarm(force_new_cluster=True)
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
with pytest.raises(docker.errors.APIError):
- self.client.init_swarm('eth0')
+ self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_custom_raft_spec(self):
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
- assert self.client.init_swarm(
- advertise_addr='eth0', swarm_spec=spec
- )
+ assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
@requires_api_version('1.24')
def test_leave_swarm(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.leave_swarm()
exc_info.value.response.status_code == 500
@@ -65,10 +55,11 @@ class SwarmTest(helpers.BaseTestCase):
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.inspect_swarm()
exc_info.value.response.status_code == 406
+ assert self.client.leave_swarm(force=True)
@requires_api_version('1.24')
def test_update_swarm(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200,
@@ -99,7 +90,7 @@ class SwarmTest(helpers.BaseTestCase):
@requires_api_version('1.24')
def test_update_swarm_name(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
node_cert_expiry=7776000000000000, name='reimuhakurei'
@@ -117,7 +108,7 @@ class SwarmTest(helpers.BaseTestCase):
@requires_api_version('1.24')
def test_list_nodes(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
@@ -136,10 +127,50 @@ class SwarmTest(helpers.BaseTestCase):
@requires_api_version('1.24')
def test_inspect_node(self):
- assert self.client.init_swarm('eth0')
+ assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
assert node['Version'] == node_data['Version']
+
+ @requires_api_version('1.24')
+ def test_update_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node = nodes_list[0]
+ orig_spec = node['Spec']
+
+ # add a new label
+ new_spec = copy.deepcopy(orig_spec)
+ new_spec['Labels'] = {'new.label': 'new value'}
+ self.client.update_node(node_id=node['ID'],
+ version=node['Version']['Index'],
+ node_spec=new_spec)
+ updated_node = self.client.inspect_node(node['ID'])
+ assert new_spec == updated_node['Spec']
+
+ # Revert the changes
+ self.client.update_node(node_id=node['ID'],
+ version=updated_node['Version']['Index'],
+ node_spec=orig_spec)
+ reverted_node = self.client.inspect_node(node['ID'])
+ assert orig_spec == reverted_node['Spec']
+
+ @requires_api_version('1.24')
+ def test_remove_main_node(self):
+ assert self.init_swarm()
+ nodes_list = self.client.nodes()
+ node_id = nodes_list[0]['ID']
+ with pytest.raises(docker.errors.NotFound):
+ self.client.remove_node('foobar01')
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id)
+
+ assert e.value.response.status_code >= 400
+
+ with pytest.raises(docker.errors.APIError) as e:
+ self.client.remove_node(node_id, True)
+
+ assert e.value.response.status_code >= 400
diff --git a/tests/integration/volume_test.py b/tests/integration/api_volume_test.py
index 8fa2dab..5a4bb1e 100644
--- a/tests/integration/volume_test.py
+++ b/tests/integration/api_volume_test.py
@@ -1,12 +1,12 @@
import docker
import pytest
-from .. import helpers
-from ..base import requires_api_version
+from ..helpers import requires_api_version
+from .base import BaseAPIIntegrationTest
@requires_api_version('1.21')
-class TestVolumes(helpers.BaseTestCase):
+class TestVolumes(BaseAPIIntegrationTest):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
@@ -49,6 +49,21 @@ class TestVolumes(helpers.BaseTestCase):
self.client.create_volume(name)
self.client.remove_volume(name)
+ @requires_api_version('1.25')
+ def test_force_remove_volume(self):
+ name = 'shootthebullet'
+ self.tmp_volumes.append(name)
+ self.client.create_volume(name)
+ self.client.remove_volume(name, force=True)
+
+ @requires_api_version('1.25')
+ def test_prune_volumes(self):
+ name = 'hopelessmasquerade'
+ self.client.create_volume(name)
+ self.tmp_volumes.append(name)
+ result = self.client.prune_volumes()
+ assert name in result['VolumesDeleted']
+
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
diff --git a/tests/integration/base.py b/tests/integration/base.py
new file mode 100644
index 0000000..3c01689
--- /dev/null
+++ b/tests/integration/base.py
@@ -0,0 +1,114 @@
+import os
+import shutil
+import unittest
+
+import docker
+from docker.utils import kwargs_from_env
+import six
+
+from .. import helpers
+
+BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
+
+
+class BaseIntegrationTest(unittest.TestCase):
+ """
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
+ """
+
+ def setUp(self):
+ if six.PY2:
+ self.assertRegex = self.assertRegexpMatches
+ self.assertCountEqual = self.assertItemsEqual
+ self.tmp_imgs = []
+ self.tmp_containers = []
+ self.tmp_folders = []
+ self.tmp_volumes = []
+ self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
+
+ def tearDown(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ for img in self.tmp_imgs:
+ try:
+ client.api.remove_image(img)
+ except docker.errors.APIError:
+ pass
+ for container in self.tmp_containers:
+ try:
+ client.api.remove_container(container, force=True)
+ except docker.errors.APIError:
+ pass
+ for network in self.tmp_networks:
+ try:
+ client.api.remove_network(network)
+ except docker.errors.APIError:
+ pass
+ for volume in self.tmp_volumes:
+ try:
+ client.api.remove_volume(volume)
+ except docker.errors.APIError:
+ pass
+
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+
+ def setUp(self):
+ super(BaseAPIIntegrationTest, self).setUp()
+ self.client = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
+ )
+
+ def tearDown(self):
+ super(BaseAPIIntegrationTest, self).tearDown()
+ self.client.close()
+
+ def run_container(self, *args, **kwargs):
+ container = self.client.create_container(*args, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ exitcode = self.client.wait(container)
+
+ if exitcode != 0:
+ output = self.client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}"
+ .format(exitcode, output))
+
+ return container
+
+ def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
+ container = self.client.create_container(
+ image=image, command=command, **kwargs)
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ return container
+
+ def execute(self, container, cmd, exit_code=0, **kwargs):
+ exc = self.client.exec_create(container, cmd, **kwargs)
+ output = self.client.exec_start(exc)
+ actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
+ msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
+ " ".join(cmd), exit_code, actual_exit_code, output)
+ assert actual_exit_code == exit_code, msg
+
+ def init_swarm(self, **kwargs):
+ return self.client.init_swarm(
+ 'eth0', listen_addr=helpers.swarm_listen_addr(), **kwargs
+ )
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
new file mode 100644
index 0000000..8f6bd86
--- /dev/null
+++ b/tests/integration/client_test.py
@@ -0,0 +1,29 @@
+import unittest
+
+import docker
+
+from ..helpers import requires_api_version
+from .base import TEST_API_VERSION
+
+
+class ClientTest(unittest.TestCase):
+ client = docker.from_env(version=TEST_API_VERSION)
+
+ def test_info(self):
+ info = self.client.info()
+ assert 'ID' in info
+ assert 'Name' in info
+
+ def test_ping(self):
+ assert self.client.ping() is True
+
+ def test_version(self):
+ assert 'Version' in self.client.version()
+
+ @requires_api_version('1.25')
+ def test_df(self):
+ data = self.client.df()
+ assert 'LayersSize' in data
+ assert 'Containers' in data
+ assert 'Volumes' in data
+ assert 'Images' in data
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index e65dd1d..4e8d268 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -4,16 +4,16 @@ import sys
import warnings
import docker.errors
+from docker.utils import kwargs_from_env
import pytest
-from ..helpers import BUSYBOX
-from ..helpers import docker_client
+from .base import BUSYBOX
@pytest.fixture(autouse=True, scope='session')
def setup_test_session():
warnings.simplefilter('error')
- c = docker_client()
+ c = docker.APIClient(version='auto', **kwargs_from_env())
try:
c.inspect_image(BUSYBOX)
except docker.errors.NotFound:
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
new file mode 100644
index 0000000..dc5cef4
--- /dev/null
+++ b/tests/integration/errors_test.py
@@ -0,0 +1,14 @@
+from docker.errors import APIError
+from .base import BaseAPIIntegrationTest, BUSYBOX
+
+
+class ErrorsTest(BaseAPIIntegrationTest):
+ def test_api_error_parses_json(self):
+ container = self.client.create_container(BUSYBOX, ['sleep', '10'])
+ self.client.start(container['Id'])
+ with self.assertRaises(APIError) as cm:
+ self.client.remove_container(container['Id'])
+ explanation = cm.exception.explanation
+ assert 'You cannot remove a running container' in explanation
+ assert '{"message":' not in explanation
+ self.client.remove_container(container['Id'], force=True)
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
new file mode 100644
index 0000000..b76a88f
--- /dev/null
+++ b/tests/integration/models_containers_test.py
@@ -0,0 +1,260 @@
+import docker
+import tempfile
+from .base import BaseIntegrationTest, TEST_API_VERSION
+from ..helpers import random_name
+
+
+class ContainerCollectionTest(BaseIntegrationTest):
+
+ def test_run(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ self.assertEqual(
+ client.containers.run("alpine", "echo hello world", remove=True),
+ b'hello world\n'
+ )
+
+ def test_run_detach(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['Config']['Image'] == "alpine"
+ assert container.attrs['Config']['Cmd'] == ['sleep', '300']
+
+ def test_run_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with self.assertRaises(docker.errors.ContainerError) as cm:
+ client.containers.run("alpine", "cat /test", remove=True)
+ assert cm.exception.exit_status == 1
+ assert "cat /test" in str(cm.exception)
+ assert "alpine" in str(cm.exception)
+ assert "No such file or directory" in str(cm.exception)
+
+ def test_run_with_image_that_does_not_exist(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with self.assertRaises(docker.errors.ImageNotFound):
+ client.containers.run("dockerpytest_does_not_exist")
+
+ def test_run_with_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ path = tempfile.mkdtemp()
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["%s:/insidecontainer" % path],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["%s:/insidecontainer" % path]
+ )
+ self.assertEqual(out, b'hello\n')
+
+ def test_run_with_named_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.volumes.create(name="somevolume")
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["somevolume:/insidecontainer"],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["somevolume:/insidecontainer"]
+ )
+ self.assertEqual(out, b'hello\n')
+
+ def test_run_with_network(self):
+ net_name = random_name()
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.networks.create(net_name)
+ self.tmp_networks.append(net_name)
+
+ container = client.containers.run(
+ 'alpine', 'echo hello world', network=net_name,
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+
+ attrs = container.attrs
+
+ assert 'NetworkSettings' in attrs
+ assert 'Networks' in attrs['NetworkSettings']
+ assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert client.containers.get(container.id).attrs[
+ 'Config']['Image'] == "alpine"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list() if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Config']['Image'] == 'alpine'
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+
+class ContainerTest(BaseIntegrationTest):
+
+ def test_commit(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test'",
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+ image = container.commit()
+ self.assertEqual(
+ client.containers.run(image.id, "cat /test", remove=True),
+ b"hello\n"
+ )
+
+ def test_diff(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "touch /test", detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.diff() == [{'Path': '/test', 'Kind': 1}]
+
+ def test_exec_run(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ assert container.exec_run("cat /test") == b"hello\n"
+
+ def test_kill(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ while container.status != 'running':
+ container.reload()
+ assert container.status == 'running'
+ container.kill()
+ container.reload()
+ assert container.status == 'exited'
+
+ def test_logs(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello world",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.logs() == b"hello world\n"
+
+ def test_pause(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ container.pause()
+ container.reload()
+ assert container.status == "paused"
+ container.unpause()
+ container.reload()
+ assert container.status == "running"
+
+ def test_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.id in [c.id for c in client.containers.list(all=True)]
+ container.wait()
+ container.remove()
+ containers = client.containers.list(all=True)
+ assert container.id not in [c.id for c in containers]
+
+ def test_rename(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "echo hello", name="test1",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.name == "test1"
+ container.rename("test2")
+ container.reload()
+ assert container.name == "test2"
+
+ def test_restart(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.restart()
+ container.reload()
+ second_started_at = container.attrs['State']['StartedAt']
+ assert first_started_at != second_started_at
+
+ def test_start(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.create("alpine", "sleep 50", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status == "created"
+ container.start()
+ container.reload()
+ assert container.status == "running"
+
+ def test_stats(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ stats = container.stats(stream=False)
+ for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in stats
+
+ def test_stop(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "top", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status in ("running", "created")
+ container.stop(timeout=2)
+ container.reload()
+ assert container.status == "exited"
+
+ def test_top(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True)
+ self.tmp_containers.append(container.id)
+ top = container.top()
+ assert len(top['Processes']) == 1
+ assert 'sleep 60' in top['Processes'][0]
+
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 60", detach=True,
+ cpu_shares=2)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['CpuShares'] == 2
+ container.update(cpu_shares=3)
+ container.reload()
+ assert container.attrs['HostConfig']['CpuShares'] == 3
+
+ def test_wait(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sh -c 'exit 0'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait() == 0
+ container = client.containers.run("alpine", "sh -c 'exit 1'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait() == 1
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
new file mode 100644
index 0000000..8f812d9
--- /dev/null
+++ b/tests/integration/models_images_test.py
@@ -0,0 +1,99 @@
+import io
+
+import docker
+import pytest
+
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_build(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ ))
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ @pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
+ def test_build_with_error(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ with self.assertRaises(docker.errors.BuildError) as cm:
+ client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "NOTADOCKERFILECOMMAND".encode('ascii')
+ ))
+ assert str(cm.exception) == ("Unknown instruction: "
+ "NOTADOCKERFILECOMMAND")
+
+ def test_build_with_multiple_success(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.build(
+ tag='some-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ def test_build_with_success_build_output(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.build(
+ tag='dup-txt-tag', fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo Successfully built abcd1234".encode('ascii')
+ )
+ )
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"Successfully built abcd1234\n"
+
+ def test_list(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list())
+
+ def test_list_with_repository(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list('alpine'))
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+ def test_pull(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+ assert 'alpine:latest' in image.attrs['RepoTags']
+
+ def test_pull_with_tag(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine', tag='3.3')
+ assert 'alpine:3.3' in image.attrs['RepoTags']
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_tag_and_remove(self):
+ repo = 'dockersdk.tests.images.test_tag'
+ tag = 'some-tag'
+ identifier = '{}:{}'.format(repo, tag)
+
+ client = docker.from_env(version=TEST_API_VERSION)
+ image = client.images.pull('alpine:latest')
+
+ result = image.tag(repo, tag)
+ assert result is True
+ self.tmp_imgs.append(identifier)
+ assert image.id in get_ids(client.images.list(repo))
+ assert image.id in get_ids(client.images.list(identifier))
+
+ client.images.remove(identifier)
+ assert image.id not in get_ids(client.images.list(repo))
+ assert image.id not in get_ids(client.images.list(identifier))
+
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+
+def get_ids(images):
+ return [i.id for i in images]
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
new file mode 100644
index 0000000..105dcc5
--- /dev/null
+++ b/tests/integration/models_networks_test.py
@@ -0,0 +1,64 @@
+import docker
+from .. import helpers
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name, labels={'foo': 'bar'})
+ self.tmp_networks.append(network.id)
+ assert network.name == name
+ assert network.attrs['Labels']['foo'] == "bar"
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network_id = client.networks.create(name).id
+ self.tmp_networks.append(network_id)
+ network = client.networks.get(network_id)
+ assert network.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ network = client.networks.create(name)
+ self.tmp_networks.append(network.id)
+ assert network.id in [n.id for n in client.networks.list()]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(ids=["fdhjklfdfdshjkfds"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(ids=[network.id])
+ ]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(names=["fdshjklfdsjhkl"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(names=[name])
+ ]
+ network.remove()
+ assert network.id not in [n.id for n in client.networks.list()]
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_connect_disconnect(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ network = client.networks.create(helpers.random_name())
+ self.tmp_networks.append(network.id)
+ container = client.containers.create("alpine", "sleep 300")
+ self.tmp_containers.append(container.id)
+ assert network.containers == []
+ network.connect(container)
+ container.start()
+ assert client.networks.get(network.id).containers == [container]
+ network.disconnect(container)
+ assert network.containers == []
+ assert client.networks.get(network.id).containers == []
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
new file mode 100644
index 0000000..5823e6b
--- /dev/null
+++ b/tests/integration/models_nodes_test.py
@@ -0,0 +1,37 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+
+
+class NodesTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_list_get_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
+ nodes = client.nodes.list()
+ assert len(nodes) == 1
+ assert nodes[0].attrs['Spec']['Role'] == 'manager'
+
+ node = client.nodes.get(nodes[0].id)
+ assert node.id == nodes[0].id
+ assert node.attrs['Spec']['Role'] == 'manager'
+ assert node.version > 0
+
+ node = client.nodes.list()[0]
+ assert not node.attrs['Spec'].get('Labels')
+ node.update({
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ })
+ node.reload()
+ assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py
new file mode 100644
index 0000000..4aafe0c
--- /dev/null
+++ b/tests/integration/models_resources_test.py
@@ -0,0 +1,16 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class ModelTest(BaseIntegrationTest):
+
+ def test_reload(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.kill()
+ container.start()
+ assert container.attrs['State']['StartedAt'] == first_started_at
+ container.reload()
+ assert container.attrs['State']['StartedAt'] != first_started_at
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
new file mode 100644
index 0000000..9b5676d
--- /dev/null
+++ b/tests/integration/models_services_test.py
@@ -0,0 +1,103 @@
+import unittest
+
+import docker
+import pytest
+
+from .. import helpers
+from .base import TEST_API_VERSION
+
+
+class ServiceTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ client = docker.from_env(version=TEST_API_VERSION)
+ helpers.force_leave_swarm(client)
+ client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
+
+ @classmethod
+ def tearDownClass(cls):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_create(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ # create arguments
+ name=name,
+ labels={'foo': 'bar'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'}
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert "alpine" in container_spec['Image']
+ assert container_spec['Labels'] == {'container': 'label'}
+
+ def test_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ image="alpine",
+ command="sleep 300"
+ )
+ service = client.services.get(service.id)
+ assert service.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ assert service in client.services.list()
+ service.remove()
+ assert service not in client.services.list()
+
+ def test_tasks(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service1 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ service2 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service1.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service1.id
+
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service2.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service2.id
+
+ @pytest.mark.skip(reason="Makes Swarm unstable?")
+ def test_update(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ service.update(
+ # create argument
+ name=service.name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Command'] == ["sleep", "600"]
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
new file mode 100644
index 0000000..e45ff3c
--- /dev/null
+++ b/tests/integration/models_swarm_test.py
@@ -0,0 +1,33 @@
+import unittest
+
+import docker
+
+from .. import helpers
+from .base import TEST_API_VERSION
+
+
+class SwarmTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
+
+ def test_init_update_leave(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init(
+ advertise_addr='eth0', snapshot_interval=5000,
+ listen_addr=helpers.swarm_listen_addr()
+ )
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
+ client.swarm.update(snapshot_interval=10000)
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
+ assert client.swarm.leave(force=True)
+ with self.assertRaises(docker.errors.APIError) as cm:
+ client.swarm.reload()
+ assert (
+ # FIXME: test for both until
+ # https://github.com/docker/docker/issues/29192 is resolved
+ cm.exception.response.status_code == 406 or
+ cm.exception.response.status_code == 503
+ )
diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py
new file mode 100644
index 0000000..47b4a45
--- /dev/null
+++ b/tests/integration/models_volumes_test.py
@@ -0,0 +1,30 @@
+import docker
+from .base import BaseIntegrationTest, TEST_API_VERSION
+
+
+class VolumesTest(BaseIntegrationTest):
+ def test_create_get(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create(
+ 'dockerpytest_1',
+ driver='local',
+ labels={'labelkey': 'labelvalue'}
+ )
+ self.tmp_volumes.append(volume.id)
+ assert volume.id
+ assert volume.name == 'dockerpytest_1'
+ assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
+
+ volume = client.volumes.get(volume.id)
+ assert volume.name == 'dockerpytest_1'
+
+ def test_list_remove(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ volume = client.volumes.create('dockerpytest_1')
+ self.tmp_volumes.append(volume.id)
+ assert volume in client.volumes.list()
+ assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
+ assert volume not in client.volumes.list(filters={'name': 'foobar'})
+
+ volume.remove()
+ assert volume not in client.volumes.list()
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index 8b321cf..e3e6d9b 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -4,12 +4,10 @@ import random
import docker
import six
-from .. import helpers
+from .base import BaseAPIIntegrationTest, BUSYBOX
-BUSYBOX = helpers.BUSYBOX
-
-class TestRegressions(helpers.BaseTestCase):
+class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
deleted file mode 100644
index 2b99316..0000000
--- a/tests/integration/service_test.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import random
-
-import docker
-
-from ..base import requires_api_version
-from .. import helpers
-
-
-BUSYBOX = helpers.BUSYBOX
-
-
-class ServiceTest(helpers.BaseTestCase):
- def setUp(self):
- super(ServiceTest, self).setUp()
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
- self.client.init_swarm('eth0')
-
- def tearDown(self):
- super(ServiceTest, self).tearDown()
- for service in self.client.services(filters={'name': 'dockerpytest_'}):
- try:
- self.client.remove_service(service['ID'])
- except docker.errors.APIError:
- pass
- try:
- self.client.leave_swarm(force=True)
- except docker.errors.APIError:
- pass
-
- def get_service_name(self):
- return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
-
- def create_simple_service(self, name=None):
- if name:
- name = 'dockerpytest_{0}'.format(name)
- else:
- name = self.get_service_name()
-
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
- )
- task_tmpl = docker.types.TaskTemplate(container_spec)
- return name, self.client.create_service(task_tmpl, name=name)
-
- @requires_api_version('1.24')
- def test_list_services(self):
- services = self.client.services()
- assert isinstance(services, list)
-
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
- self.create_simple_service()
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 1
- assert 'dockerpytest_' in test_services[0]['Spec']['Name']
-
- def test_inspect_service_by_id(self):
- svc_name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_id)
- assert 'ID' in svc_info
- assert svc_info['ID'] == svc_id['ID']
-
- def test_inspect_service_by_name(self):
- svc_name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_name)
- assert 'ID' in svc_info
- assert svc_info['ID'] == svc_id['ID']
-
- def test_remove_service_by_id(self):
- svc_name, svc_id = self.create_simple_service()
- assert self.client.remove_service(svc_id)
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
-
- def test_remove_service_by_name(self):
- svc_name, svc_id = self.create_simple_service()
- assert self.client.remove_service(svc_name)
- test_services = self.client.services(filters={'name': 'dockerpytest_'})
- assert len(test_services) == 0
-
- def test_create_service_simple(self):
- name, svc_id = self.create_simple_service()
- assert self.client.inspect_service(svc_id)
- services = self.client.services(filters={'name': name})
- assert len(services) == 1
- assert services[0]['ID'] == svc_id['ID']
-
- def test_create_service_custom_log_driver(self):
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
- )
- log_cfg = docker.types.DriverConfig('none')
- task_tmpl = docker.types.TaskTemplate(
- container_spec, log_driver=log_cfg
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'TaskTemplate' in svc_info['Spec']
- res_template = svc_info['Spec']['TaskTemplate']
- assert 'LogDriver' in res_template
- assert 'Name' in res_template['LogDriver']
- assert res_template['LogDriver']['Name'] == 'none'
-
- def test_create_service_with_volume_mount(self):
- vol_name = self.get_service_name()
- container_spec = docker.types.ContainerSpec(
- 'busybox', ['ls'],
- mounts=[
- docker.types.Mount(target='/test', source=vol_name)
- ]
- )
- self.tmp_volumes.append(vol_name)
- task_tmpl = docker.types.TaskTemplate(container_spec)
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
- cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
- assert 'Mounts' in cspec
- assert len(cspec['Mounts']) == 1
- mount = cspec['Mounts'][0]
- assert mount['Target'] == '/test'
- assert mount['Source'] == vol_name
- assert mount['Type'] == 'volume'
-
- def test_create_service_with_resources_constraints(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- resources = docker.types.Resources(
- cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
- cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
- )
- task_tmpl = docker.types.TaskTemplate(
- container_spec, resources=resources
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'TaskTemplate' in svc_info['Spec']
- res_template = svc_info['Spec']['TaskTemplate']
- assert 'Resources' in res_template
- assert res_template['Resources']['Limits'] == resources['Limits']
- assert res_template['Resources']['Reservations'] == resources[
- 'Reservations'
- ]
-
- def test_create_service_with_update_config(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- task_tmpl = docker.types.TaskTemplate(container_spec)
- update_config = docker.types.UpdateConfig(
- parallelism=10, delay=5, failure_action='pause'
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(
- task_tmpl, update_config=update_config, name=name
- )
- svc_info = self.client.inspect_service(svc_id)
- assert 'UpdateConfig' in svc_info['Spec']
- assert update_config == svc_info['Spec']['UpdateConfig']
-
- def test_create_service_with_restart_policy(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
- policy = docker.types.RestartPolicy(
- docker.types.RestartPolicy.condition_types.ANY,
- delay=5, max_attempts=5
- )
- task_tmpl = docker.types.TaskTemplate(
- container_spec, restart_policy=policy
- )
- name = self.get_service_name()
- svc_id = self.client.create_service(task_tmpl, name=name)
- svc_info = self.client.inspect_service(svc_id)
- assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
- assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
-
- def test_update_service_name(self):
- name, svc_id = self.create_simple_service()
- svc_info = self.client.inspect_service(svc_id)
- svc_version = svc_info['Version']['Index']
- new_name = self.get_service_name()
- assert self.client.update_service(
- svc_id, svc_version, name=new_name,
- task_template=svc_info['Spec']['TaskTemplate']
- )
- svc_info = self.client.inspect_service(svc_id)
- assert svc_info['Spec']['Name'] == new_name
diff --git a/tests/integration/testdata/dummy-plugin/config.json b/tests/integration/testdata/dummy-plugin/config.json
new file mode 100644
index 0000000..53b4e7a
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/config.json
@@ -0,0 +1,19 @@
+{
+ "description": "Dummy test plugin for docker python SDK",
+ "documentation": "https://github.com/docker/docker-py",
+ "entrypoint": ["/dummy"],
+ "network": {
+ "type": "host"
+ },
+ "interface" : {
+ "types": ["docker.volumedriver/1.0"],
+ "socket": "dummy.sock"
+ },
+ "env": [
+ {
+ "name":"DEBUG",
+ "settable":["value"],
+ "value":"0"
+ }
+ ]
+}
diff --git a/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
diff --git a/tests/unit/build_test.py b/tests/unit/api_build_test.py
index b2705eb..927aa97 100644
--- a/tests/unit/build_test.py
+++ b/tests/unit/api_build_test.py
@@ -4,14 +4,13 @@ import io
import docker
from docker import auth
-from .api_test import DockerClientTest, fake_request, url_prefix
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
-class BuildTest(DockerClientTest):
+class BuildTest(BaseAPIClientTest):
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -23,7 +22,6 @@ class BuildTest(DockerClientTest):
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -35,7 +33,6 @@ class BuildTest(DockerClientTest):
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -47,7 +44,6 @@ class BuildTest(DockerClientTest):
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -60,7 +56,6 @@ class BuildTest(DockerClientTest):
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
diff --git a/tests/unit/container_test.py b/tests/unit/api_container_test.py
index db3dd74..3b135a8 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/api_container_test.py
@@ -9,9 +9,9 @@ import pytest
import six
from . import fake_api
-from ..base import requires_api_version
+from ..helpers import requires_api_version
from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
)
@@ -25,7 +25,7 @@ def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
-class StartContainerTest(DockerClientTest):
+class StartContainerTest(BaseAPIClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
@@ -34,10 +34,7 @@ class StartContainerTest(DockerClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
@@ -48,7 +45,7 @@ class StartContainerTest(DockerClientTest):
self.assertEqual(
str(excinfo.value),
- 'image or container param is undefined',
+ 'Resource ID was not provided',
)
with pytest.raises(ValueError) as excinfo:
@@ -56,32 +53,28 @@ class StartContainerTest(DockerClientTest):
self.assertEqual(
str(excinfo.value),
- 'image or container param is undefined',
+ 'Resource ID was not provided',
)
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_lxc_conf_compat(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_ro(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
@@ -91,22 +84,18 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_rw(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_port_binds(self):
self.maxDiff = None
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
@@ -116,18 +105,14 @@ class StartContainerTest(DockerClientTest):
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_multiple_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
@@ -136,21 +121,15 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links_as_list_of_tuples(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
- pytest.deprecated_call(call_start)
-
def test_start_container_privileged(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -159,16 +138,13 @@ class StartContainerTest(DockerClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
-class CreateContainerTest(DockerClientTest):
+class CreateContainerTest(BaseAPIClientTest):
def test_create_container(self):
self.client.create_container('busybox', 'true')
@@ -356,7 +332,34 @@ class CreateContainerTest(DockerClientTest):
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
- "CpuSetCpus": "0,1",
+ "CpusetCpus": "0,1",
+ "NetworkMode": "default"
+ }}'''))
+ self.assertEqual(args[1]['headers'],
+ {'Content-Type': 'application/json'})
+
+ @requires_api_version('1.19')
+ def test_create_container_with_host_config_cpuset_mems(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpuset_mems='0'
+ )
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1],
+ url_prefix + 'containers/create')
+
+ self.assertEqual(json.loads(args[1]['data']),
+ json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpusetMems": "0",
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
@@ -431,11 +434,8 @@ class CreateContainerTest(DockerClientTest):
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
- self.client.create_container('busybox', 'true', volumes_from=[])
-
- args = fake_request.call_args
- data = json.loads(args[1]['data'])
- self.assertTrue('VolumesFrom' not in data)
+ with pytest.raises(docker.errors.InvalidVersion):
+ self.client.create_container('busybox', 'true', volumes_from=[])
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
@@ -1002,11 +1002,11 @@ class CreateContainerTest(DockerClientTest):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
+ volume_driver='foodriver',
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
- volume_driver='foodriver',
)
args = fake_request.call_args
@@ -1014,8 +1014,8 @@ class CreateContainerTest(DockerClientTest):
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
- expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = self.client.create_host_config()
+ expected_payload['HostConfig']['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
@@ -1179,8 +1179,40 @@ class CreateContainerTest(DockerClientTest):
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
+ @requires_api_version('1.25')
+ def test_create_container_with_host_config_cpus(self):
+ self.client.create_container(
+ 'busybox', 'ls', host_config=self.client.create_host_config(
+ cpu_count=1,
+ cpu_percent=20,
+ nano_cpus=1000
+ )
+ )
+
+ args = fake_request.call_args
+ self.assertEqual(args[0][1],
+ url_prefix + 'containers/create')
-class ContainerTest(DockerClientTest):
+ self.assertEqual(json.loads(args[1]['data']),
+ json.loads('''
+ {"Tty": false, "Image": "busybox",
+ "Cmd": ["ls"], "AttachStdin": false,
+ "AttachStderr": true,
+ "AttachStdout": true, "OpenStdin": false,
+ "StdinOnce": false,
+ "NetworkDisabled": false,
+ "HostConfig": {
+ "CpuCount": 1,
+ "CpuPercent": 20,
+ "NanoCpus": 1000,
+ "NetworkMode": "default"
+ }}'''))
+ self.assertEqual(
+ args[1]['headers'], {'Content-Type': 'application/json'}
+ )
+
+
+class ContainerTest(BaseAPIClientTest):
def test_list_containers(self):
self.client.containers(all=True)
@@ -1244,7 +1276,7 @@ class ContainerTest(DockerClientTest):
)
def test_logs(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
@@ -1263,7 +1295,7 @@ class ContainerTest(DockerClientTest):
)
def test_logs_with_dict_instead_of_id(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -1282,7 +1314,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_streaming(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=False)
@@ -1297,7 +1329,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=True)
@@ -1312,7 +1344,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following_backwards(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
@@ -1326,7 +1358,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_streaming_and_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=True)
@@ -1342,7 +1374,7 @@ class ContainerTest(DockerClientTest):
def test_log_tail(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, tail=10)
@@ -1358,7 +1390,7 @@ class ContainerTest(DockerClientTest):
def test_log_since(self):
ts = 809222400
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=ts)
@@ -1375,7 +1407,7 @@ class ContainerTest(DockerClientTest):
def test_log_since_with_datetime(self):
ts = 809222400
time = datetime.datetime.utcfromtimestamp(ts)
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=time)
@@ -1389,11 +1421,18 @@ class ContainerTest(DockerClientTest):
stream=False
)
+ def test_log_since_with_invalid_value_raises_error(self):
+ with mock.patch('docker.api.client.APIClient.inspect_container',
+ fake_inspect_container):
+ with self.assertRaises(docker.errors.InvalidArgument):
+ self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
+ follow=False, since=42.42)
+
def test_log_tty(self):
m = mock.Mock()
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container_tty):
- with mock.patch('docker.Client._stream_raw_result',
+ with mock.patch('docker.api.client.APIClient._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
@@ -1583,7 +1622,7 @@ class ContainerTest(DockerClientTest):
self.client.inspect_container(arg)
self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
+ excinfo.value.args[0], 'Resource ID was not provided'
)
def test_container_stats(self):
diff --git a/tests/unit/exec_test.py b/tests/unit/api_exec_test.py
index 6ba2a3d..41ee940 100644
--- a/tests/unit/exec_test.py
+++ b/tests/unit/api_exec_test.py
@@ -2,11 +2,11 @@ import json
from . import fake_api
from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
)
-class ExecTest(DockerClientTest):
+class ExecTest(BaseAPIClientTest):
def test_exec_create(self):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
diff --git a/tests/unit/image_test.py b/tests/unit/api_image_test.py
index b2b1dd6..36b2a46 100644
--- a/tests/unit/image_test.py
+++ b/tests/unit/api_image_test.py
@@ -4,7 +4,7 @@ import pytest
from . import fake_api
from docker import auth
from .api_test import (
- DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
+ BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
)
@@ -14,7 +14,7 @@ except ImportError:
import mock
-class ImageTest(DockerClientTest):
+class ImageTest(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
@@ -204,7 +204,7 @@ class ImageTest(DockerClientTest):
self.client.inspect_image(arg)
self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
+ excinfo.value.args[0], 'Resource ID was not provided'
)
def test_insert_image(self):
@@ -228,7 +228,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
@@ -245,7 +245,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_with_tag(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
@@ -271,9 +271,9 @@ class ImageTest(DockerClientTest):
}
encoded_auth = auth.encode_header(auth_config)
self.client.push(
- fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
- auth_config=auth_config
- )
+ fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
+ auth_config=auth_config
+ )
fake_request.assert_called_with(
'POST',
@@ -289,7 +289,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_stream(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
diff --git a/tests/unit/network_test.py b/tests/unit/api_network_test.py
index 2521688..f997a1b 100644
--- a/tests/unit/network_test.py
+++ b/tests/unit/api_network_test.py
@@ -2,9 +2,9 @@ import json
import six
-from .. import base
-from .api_test import DockerClientTest, url_prefix, response
-from docker.utils import create_ipam_config, create_ipam_pool
+from .api_test import BaseAPIClientTest, url_prefix, response
+from ..helpers import requires_api_version
+from docker.types import IPAMConfig, IPAMPool
try:
from unittest import mock
@@ -12,8 +12,8 @@ except ImportError:
import mock
-class NetworkTest(DockerClientTest):
- @base.requires_api_version('1.21')
+class NetworkTest(BaseAPIClientTest):
+ @requires_api_version('1.21')
def test_list_networks(self):
networks = [
{
@@ -33,7 +33,7 @@ class NetworkTest(DockerClientTest):
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
- with mock.patch('docker.Client.get', get):
+ with mock.patch('docker.api.client.APIClient.get', get):
self.assertEqual(self.client.networks(), networks)
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
@@ -49,7 +49,7 @@ class NetworkTest(DockerClientTest):
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'id': ['123']})
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_create_network(self):
network_data = {
"id": 'abc12345',
@@ -59,7 +59,7 @@ class NetworkTest(DockerClientTest):
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
result = self.client.create_network('foo')
self.assertEqual(result, network_data)
@@ -81,9 +81,9 @@ class NetworkTest(DockerClientTest):
json.loads(post.call_args[1]['data']),
{"Name": "foo", "Driver": "bridge", "Options": opts})
- ipam_pool_config = create_ipam_pool(subnet="192.168.52.0/24",
- gateway="192.168.52.254")
- ipam_config = create_ipam_config(pool_configs=[ipam_pool_config])
+ ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
+ gateway="192.168.52.254")
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
self.client.create_network("bar", driver="bridge",
ipam=ipam_config)
@@ -100,23 +100,23 @@ class NetworkTest(DockerClientTest):
"Gateway": "192.168.52.254",
"Subnet": "192.168.52.0/24",
"AuxiliaryAddresses": None,
- }]
+ }],
}
})
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
- with mock.patch('docker.Client.delete', delete):
+ with mock.patch('docker.api.client.APIClient.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
@@ -130,7 +130,7 @@ class NetworkTest(DockerClientTest):
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
- with mock.patch('docker.Client.get', get):
+ with mock.patch('docker.api.client.APIClient.get', get):
result = self.client.inspect_network(network_id)
self.assertEqual(result, network_data)
@@ -138,14 +138,14 @@ class NetworkTest(DockerClientTest):
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
self.client.connect_container_to_network(
{'Id': container_id},
network_id,
@@ -167,14 +167,14 @@ class NetworkTest(DockerClientTest):
},
})
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
self.client.disconnect_container_from_network(
{'Id': container_id}, network_id)
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 8faca6b..83848c5 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,21 +1,21 @@
import datetime
import json
+import io
import os
import re
import shutil
import socket
-import sys
import tempfile
import threading
import time
-import io
+import unittest
import docker
+from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
-from .. import base
from . import fake_api
import pytest
@@ -86,21 +86,25 @@ def fake_delete(self, url, *args, **kwargs):
def fake_read_from_socket(self, response, stream):
return six.binary_type()
+
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
-class DockerClientTest(base.Cleanup, base.BaseTestCase):
+class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
- 'docker.Client', get=fake_get, post=fake_post, put=fake_put,
+ 'docker.api.client.APIClient',
+ get=fake_get,
+ post=fake_post,
+ put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
- self.client = docker.Client()
+ self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
@@ -108,11 +112,6 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
self.client.close()
self.patcher.stop()
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(DockerClientTest, self).assertIn(object, collection)
-
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
@@ -124,10 +123,10 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
}
-class DockerApiTest(DockerClientTest):
+class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
- docker.Client(version=1.12)
+ APIClient(version=1.12)
self.assertEqual(
str(excinfo.value),
@@ -194,7 +193,7 @@ class DockerApiTest(DockerClientTest):
)
def test_retrieve_server_version(self):
- client = docker.Client(version="auto")
+ client = APIClient(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
@@ -229,7 +228,8 @@ class DockerApiTest(DockerClientTest):
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
- stream=True
+ stream=True,
+ timeout=None
)
def test_events_with_since_until(self):
@@ -248,7 +248,8 @@ class DockerApiTest(DockerClientTest):
'until': ts + 10,
'filters': None
},
- stream=True
+ stream=True,
+ timeout=None
)
def test_events_with_filters(self):
@@ -266,7 +267,8 @@ class DockerApiTest(DockerClientTest):
'until': None,
'filters': expected_filters
},
- stream=True
+ stream=True,
+ timeout=None
)
def _socket_path_for_client_session(self, client):
@@ -274,27 +276,27 @@ class DockerApiTest(DockerClientTest):
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
- c = docker.Client(base_url="unix://socket")
+ c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
- c = docker.Client(base_url="unix:///socket")
+ c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
- c = docker.Client(base_url="http+unix:///socket")
+ c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
- c = docker.Client(base_url="http://hostname:1234")
+ c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
- c = docker.Client(base_url="tcp://hostname:1234")
+ c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
@@ -327,7 +329,7 @@ class DockerApiTest(DockerClientTest):
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
- setattr(raw_resp._fp, 'chunk_left', len(body.getvalue())-1)
+ setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
@@ -355,7 +357,7 @@ class DockerApiTest(DockerClientTest):
self.assertEqual(result, content)
-class StreamTest(base.Cleanup, base.BaseTestCase):
+class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
@@ -440,7 +442,7 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with docker.Client(base_url="http+unix://" + self.socket_file) \
+ with APIClient(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
@@ -457,10 +459,10 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
str(i).encode() for i in range(50)])
-class UserAgentTest(base.BaseTestCase):
+class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
- docker.Client,
+ APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
@@ -470,18 +472,62 @@ class UserAgentTest(base.BaseTestCase):
self.patcher.stop()
def test_default_user_agent(self):
- client = docker.Client()
+ client = APIClient()
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
- expected = 'docker-py/%s' % docker.__version__
+ expected = 'docker-sdk-python/%s' % docker.__version__
self.assertEqual(headers['User-Agent'], expected)
def test_custom_user_agent(self):
- client = docker.Client(user_agent='foo/bar')
+ client = APIClient(user_agent='foo/bar')
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
self.assertEqual(headers['User-Agent'], 'foo/bar')
+
+
+class DisableSocketTest(unittest.TestCase):
+ class DummySocket(object):
+ def __init__(self, timeout=60):
+ self.timeout = timeout
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
+
+ def gettimeout(self):
+ return self.timeout
+
+ def setUp(self):
+ self.client = APIClient()
+
+ def test_disable_socket_timeout(self):
+ """Test that the timeout is disabled on a generic socket object."""
+ socket = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+
+ def test_disable_socket_timeout2(self):
+ """Test that the timeouts are disabled on a generic socket object
+ and it's _sock object if present."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+ self.assertEqual(socket._sock.timeout, None)
+
+ def test_disable_socket_timout_non_blocking(self):
+ """Test that a non-blocking socket does not get set to blocking."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket(0.0)
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+ self.assertEqual(socket._sock.timeout, 0.0)
diff --git a/tests/unit/volume_test.py b/tests/unit/api_volume_test.py
index 136d11a..fc2a556 100644
--- a/tests/unit/volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -2,12 +2,12 @@ import json
import pytest
-from .. import base
-from .api_test import DockerClientTest, url_prefix, fake_request
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
-class VolumeTest(DockerClientTest):
- @base.requires_api_version('1.21')
+class VolumeTest(BaseAPIClientTest):
+ @requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
self.assertIn('Volumes', volumes)
@@ -17,7 +17,7 @@ class VolumeTest(DockerClientTest):
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], url_prefix + 'volumes')
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_list_volumes_and_filters(self):
volumes = self.client.volumes(filters={'dangling': True})
assert 'Volumes' in volumes
@@ -29,7 +29,7 @@ class VolumeTest(DockerClientTest):
assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'},
'timeout': 60}
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
@@ -43,7 +43,7 @@ class VolumeTest(DockerClientTest):
self.assertEqual(args[0][1], url_prefix + 'volumes/create')
self.assertEqual(json.loads(args[1]['data']), {'Name': name})
- @base.requires_api_version('1.23')
+ @requires_api_version('1.23')
def test_create_volume_with_labels(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name, labels={
@@ -53,13 +53,13 @@ class VolumeTest(DockerClientTest):
{'com.example.some-label': 'some-value'}
)
- @base.requires_api_version('1.23')
+ @requires_api_version('1.23')
def test_create_volume_with_invalid_labels(self):
name = 'perfectcherryblossom'
with pytest.raises(TypeError):
self.client.create_volume(name, labels=1)
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
@@ -72,7 +72,7 @@ class VolumeTest(DockerClientTest):
self.assertIn('Driver', data)
self.assertEqual(data['Driver'], driver_name)
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
@@ -89,7 +89,17 @@ class VolumeTest(DockerClientTest):
'perfectcherryblossom', driver_opts=''
)
- @base.requires_api_version('1.21')
+ @requires_api_version('1.24')
+ def test_create_volume_with_no_specified_name(self):
+ result = self.client.create_volume(name=None)
+ self.assertIn('Name', result)
+ self.assertNotEqual(result['Name'], None)
+ self.assertIn('Driver', result)
+ self.assertEqual(result['Driver'], 'local')
+ self.assertIn('Scope', result)
+ self.assertEqual(result['Scope'], 'local')
+
+ @requires_api_version('1.21')
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
@@ -102,7 +112,7 @@ class VolumeTest(DockerClientTest):
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
- @base.requires_api_version('1.21')
+ @requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
self.client.remove_volume(name)
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index f395133..56fd50c 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -7,12 +7,12 @@ import os.path
import random
import shutil
import tempfile
+import unittest
-from docker import auth
-from docker.auth.auth import parse_auth
-from docker import errors
+from py.test import ensuretemp
+from pytest import mark
-from .. import base
+from docker import auth, errors
try:
from unittest import mock
@@ -20,7 +20,7 @@ except ImportError:
import mock
-class RegressionTest(base.BaseTestCase):
+class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
@@ -31,7 +31,7 @@ class RegressionTest(base.BaseTestCase):
assert b'_' in encoded
-class ResolveRepositoryNameTest(base.BaseTestCase):
+class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
@@ -117,12 +117,12 @@ def encode_auth(auth_info):
auth_info.get('password', '').encode('utf-8'))
-class ResolveAuthTest(base.BaseTestCase):
+class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = parse_auth({
+ auth_config = auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
@@ -272,7 +272,108 @@ class ResolveAuthTest(base.BaseTestCase):
)
-class LoadConfigTest(base.Cleanup, base.BaseTestCase):
+class CredStoreTest(unittest.TestCase):
+ def test_get_credential_store(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ }
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = {
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ }
+
+ assert auth.get_credential_store(auth_config, None) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'docker.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'images.io'
+ ) == 'truesecret'
+
+
+class FindConfigFileTest(unittest.TestCase):
+ def tmpdir(self, name):
+ tmpdir = ensuretemp(name)
+ self.addCleanup(tmpdir.remove)
+ return tmpdir
+
+ def test_find_config_fallback(self):
+ tmpdir = self.tmpdir('test_find_config_fallback')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert auth.find_config_file() is None
+
+ def test_find_config_from_explicit_path(self):
+ tmpdir = self.tmpdir('test_find_config_from_explicit_path')
+ config_path = tmpdir.ensure('my-config-file.json')
+
+ assert auth.find_config_file(str(config_path)) == str(config_path)
+
+ def test_find_config_from_environment(self):
+ tmpdir = self.tmpdir('test_find_config_from_environment')
+ config_path = tmpdir.ensure('config.json')
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
+ assert auth.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_posix(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_posix')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert auth.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_legacy_name(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
+ config_path = tmpdir.ensure('.dockercfg')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert auth.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform != 'win32'")
+ def test_find_config_from_home_windows(self):
+ tmpdir = self.tmpdir('test_find_config_from_home_windows')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
+ assert auth.find_config_file() == str(config_path)
+
+
+class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index 6ceb8cb..c4996f1 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -1,14 +1,81 @@
+import datetime
+import docker
+from docker.utils import kwargs_from_env
+from docker.constants import (
+ DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
+)
import os
-from docker.client import Client
-from .. import base
+import unittest
-TEST_CERT_DIR = os.path.join(
- os.path.dirname(__file__),
- 'testdata/certs',
-)
+from . import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+
+
+class ClientTest(unittest.TestCase):
+
+ @mock.patch('docker.api.APIClient.events')
+ def test_events(self, mock_func):
+ since = datetime.datetime(2016, 1, 1, 0, 0)
+ mock_func.return_value = fake_api.get_fake_events()[1]
+ client = docker.from_env()
+ assert client.events(since=since) == mock_func.return_value
+ mock_func.assert_called_with(since=since)
+
+ @mock.patch('docker.api.APIClient.info')
+ def test_info(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_info()[1]
+ client = docker.from_env()
+ assert client.info() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.ping')
+ def test_ping(self, mock_func):
+ mock_func.return_value = True
+ client = docker.from_env()
+ assert client.ping() is True
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.version')
+ def test_version(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_version()[1]
+ client = docker.from_env()
+ assert client.version() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ def test_call_api_client_method(self):
+ client = docker.from_env()
+ with self.assertRaises(AttributeError) as cm:
+ client.create_container()
+ s = str(cm.exception)
+ assert "'DockerClient' object has no attribute 'create_container'" in s
+ assert "this method is now on the object APIClient" in s
+
+ with self.assertRaises(AttributeError) as cm:
+ client.abcdef()
+ s = str(cm.exception)
+ assert "'DockerClient' object has no attribute 'abcdef'" in s
+ assert "this method is now on the object APIClient" not in s
+
+ def test_call_containers(self):
+ client = docker.DockerClient(**kwargs_from_env())
+ with self.assertRaises(TypeError) as cm:
+ client.containers()
+
+ s = str(cm.exception)
+ assert "'ContainerCollection' object is not callable" in s
+ assert "docker.APIClient" in s
+
+
+class FromEnvTest(unittest.TestCase):
-class ClientTest(base.BaseTestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -22,57 +89,23 @@ class ClientTest(base.BaseTestCase):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env()
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
+ client = docker.from_env()
+ self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
def test_from_env_with_version(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env(version='2.32')
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
- self.assertEqual(client._version, '2.32')
-
-
-class DisableSocketTest(base.BaseTestCase):
- class DummySocket(object):
- def __init__(self, timeout=60):
- self.timeout = timeout
-
- def settimeout(self, timeout):
- self.timeout = timeout
-
- def gettimeout(self):
- return self.timeout
-
- def setUp(self):
- self.client = Client()
-
- def test_disable_socket_timeout(self):
- """Test that the timeout is disabled on a generic socket object."""
- socket = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
-
- self.assertEqual(socket.timeout, None)
-
- def test_disable_socket_timeout2(self):
- """Test that the timeouts are disabled on a generic socket object
- and it's _sock object if present."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
+ client = docker.from_env(version='2.32')
+ self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
+ self.assertEqual(client.api._version, '2.32')
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, None)
+ def test_from_env_without_version_uses_default(self):
+ client = docker.from_env()
- def test_disable_socket_timout_non_blocking(self):
- """Test that a non-blocking socket does not get set to blocking."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket(0.0)
+ self.assertEqual(client.api._version, DEFAULT_DOCKER_API_VERSION)
- self.client._disable_socket_timeout(socket)
+ def test_from_env_without_timeout_uses_default(self):
+ client = docker.from_env()
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, 0.0)
+ self.assertEqual(client.api.timeout, DEFAULT_TIMEOUT_SECONDS)
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
new file mode 100644
index 0000000..8dbb35e
--- /dev/null
+++ b/tests/unit/dockertypes_test.py
@@ -0,0 +1,415 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+import warnings
+
+import pytest
+
+from docker.constants import DEFAULT_DOCKER_API_VERSION
+from docker.errors import InvalidArgument, InvalidVersion
+from docker.types import (
+ ContainerConfig, ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
+ IPAMPool, LogConfig, Mount, ServiceMode, Ulimit,
+)
+
+try:
+ from unittest import mock
+except:
+ import mock
+
+
+def create_host_config(*args, **kwargs):
+ return HostConfig(*args, **kwargs)
+
+
+class HostConfigTest(unittest.TestCase):
+ def test_create_host_config_no_options(self):
+ config = create_host_config(version='1.19')
+ self.assertFalse('NetworkMode' in config)
+
+ def test_create_host_config_no_options_newer_api_version(self):
+ config = create_host_config(version='1.20')
+ self.assertEqual(config['NetworkMode'], 'default')
+
+ def test_create_host_config_invalid_cpu_cfs_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota=23.11)
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period=1999.0)
+
+ def test_create_host_config_with_cpu_quota(self):
+ config = create_host_config(version='1.20', cpu_quota=1999)
+ self.assertEqual(config.get('CpuQuota'), 1999)
+
+ def test_create_host_config_with_cpu_period(self):
+ config = create_host_config(version='1.20', cpu_period=1999)
+ self.assertEqual(config.get('CpuPeriod'), 1999)
+
+ def test_create_host_config_with_blkio_constraints(self):
+ blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
+ config = create_host_config(version='1.22',
+ blkio_weight=1999,
+ blkio_weight_device=blkio_rate,
+ device_read_bps=blkio_rate,
+ device_write_bps=blkio_rate,
+ device_read_iops=blkio_rate,
+ device_write_iops=blkio_rate)
+
+ self.assertEqual(config.get('BlkioWeight'), 1999)
+ self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
+ self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
+ self.assertEqual(blkio_rate[0]['Rate'], 1000)
+
+ def test_create_host_config_with_shm_size(self):
+ config = create_host_config(version='1.22', shm_size=67108864)
+ self.assertEqual(config.get('ShmSize'), 67108864)
+
+ def test_create_host_config_with_shm_size_in_mb(self):
+ config = create_host_config(version='1.22', shm_size='64M')
+ self.assertEqual(config.get('ShmSize'), 67108864)
+
+ def test_create_host_config_with_oom_kill_disable(self):
+ config = create_host_config(version='1.20', oom_kill_disable=True)
+ self.assertEqual(config.get('OomKillDisable'), True)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.18.3',
+ oom_kill_disable=True))
+
+ def test_create_host_config_with_userns_mode(self):
+ config = create_host_config(version='1.23', userns_mode='host')
+ self.assertEqual(config.get('UsernsMode'), 'host')
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.22',
+ userns_mode='host'))
+ self.assertRaises(
+ ValueError, lambda: create_host_config(version='1.23',
+ userns_mode='host12'))
+
+ def test_create_host_config_with_oom_score_adj(self):
+ config = create_host_config(version='1.22', oom_score_adj=100)
+ self.assertEqual(config.get('OomScoreAdj'), 100)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.21',
+ oom_score_adj=100))
+ self.assertRaises(
+ TypeError, lambda: create_host_config(version='1.22',
+ oom_score_adj='100'))
+
+ def test_create_host_config_with_dns_opt(self):
+
+ tested_opts = ['use-vc', 'no-tld-query']
+ config = create_host_config(version='1.21', dns_opt=tested_opts)
+ dns_opts = config.get('DnsOptions')
+
+ self.assertTrue('use-vc' in dns_opts)
+ self.assertTrue('no-tld-query' in dns_opts)
+
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.20',
+ dns_opt=tested_opts))
+
+ def test_create_host_config_with_mem_reservation(self):
+ config = create_host_config(version='1.21', mem_reservation=67108864)
+ self.assertEqual(config.get('MemoryReservation'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', mem_reservation=67108864))
+
+ def test_create_host_config_with_kernel_memory(self):
+ config = create_host_config(version='1.21', kernel_memory=67108864)
+ self.assertEqual(config.get('KernelMemory'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', kernel_memory=67108864))
+
+ def test_create_host_config_with_pids_limit(self):
+ config = create_host_config(version='1.23', pids_limit=1024)
+ self.assertEqual(config.get('PidsLimit'), 1024)
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', pids_limit=1024)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.23', pids_limit='1024')
+
+ def test_create_host_config_with_isolation(self):
+ config = create_host_config(version='1.24', isolation='hyperv')
+ self.assertEqual(config.get('Isolation'), 'hyperv')
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.23', isolation='hyperv')
+ with pytest.raises(TypeError):
+ create_host_config(
+ version='1.24', isolation={'isolation': 'hyperv'}
+ )
+
+ def test_create_host_config_pid_mode(self):
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', pid_mode='baccab125')
+
+ config = create_host_config(version='1.23', pid_mode='host')
+ assert config.get('PidMode') == 'host'
+ config = create_host_config(version='1.24', pid_mode='baccab125')
+ assert config.get('PidMode') == 'baccab125'
+
+ def test_create_host_config_invalid_mem_swappiness(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.24', mem_swappiness='40')
+
+ def test_create_host_config_with_volume_driver(self):
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.20', volume_driver='local')
+
+ config = create_host_config(version='1.21', volume_driver='local')
+ assert config.get('VolumeDriver') == 'local'
+
+ def test_create_host_config_invalid_cpu_count_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_count='1')
+
+ def test_create_host_config_with_cpu_count(self):
+ config = create_host_config(version='1.25', cpu_count=2)
+ self.assertEqual(config.get('CpuCount'), 2)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.24', cpu_count=1))
+
+ def test_create_host_config_invalid_cpu_percent_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', cpu_percent='1')
+
+ def test_create_host_config_with_cpu_percent(self):
+ config = create_host_config(version='1.25', cpu_percent=15)
+ self.assertEqual(config.get('CpuPercent'), 15)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.24', cpu_percent=10))
+
+ def test_create_host_config_invalid_nano_cpus_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.25', nano_cpus='0')
+
+ def test_create_host_config_with_nano_cpus(self):
+ config = create_host_config(version='1.25', nano_cpus=1000)
+ self.assertEqual(config.get('NanoCpus'), 1000)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.24', nano_cpus=1))
+
+
+class ContainerConfigTest(unittest.TestCase):
+ def test_create_container_config_volume_driver_warning(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ ContainerConfig(
+ version='1.21', image='scratch', command=None,
+ volume_driver='local'
+ )
+
+ assert len(w) == 1
+ assert 'The volume_driver option has been moved' in str(w[0].message)
+
+
+class ContainerSpecTest(unittest.TestCase):
+ def test_parse_mounts(self):
+ spec = ContainerSpec(
+ image='scratch', mounts=[
+ '/local:/container',
+ '/local2:/container2:ro',
+ Mount(target='/target', source='/source')
+ ]
+ )
+
+ assert 'Mounts' in spec
+ assert len(spec['Mounts']) == 3
+ for mount in spec['Mounts']:
+ assert isinstance(mount, Mount)
+
+
+class UlimitTest(unittest.TestCase):
+ def test_create_host_config_dict_ulimit(self):
+ ulimit_dct = {'name': 'nofile', 'soft': 8096}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_dict_ulimit_capitals(self):
+ ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
+ self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_obj_ulimit(self):
+ ulimit_dct = Ulimit(name='nofile', soft=8096)
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj, ulimit_dct)
+
+ def test_ulimit_invalid_type(self):
+ self.assertRaises(ValueError, lambda: Ulimit(name=None))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
+
+
+class LogConfigTest(unittest.TestCase):
+ def test_create_host_config_dict_logconfig(self):
+ dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=dct
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(dct['type'], config['LogConfig'].type)
+
+ def test_create_host_config_obj_logconfig(self):
+ obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=obj
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(obj, config['LogConfig'])
+
+ def test_logconfig_invalid_config_type(self):
+ with pytest.raises(ValueError):
+ LogConfig(type=LogConfig.types.JSON, config='helloworld')
+
+
+class EndpointConfigTest(unittest.TestCase):
+ def test_create_endpoint_config_with_aliases(self):
+ config = EndpointConfig(version='1.22', aliases=['foo', 'bar'])
+ assert config == {'Aliases': ['foo', 'bar']}
+
+ with pytest.raises(InvalidVersion):
+ EndpointConfig(version='1.21', aliases=['foo', 'bar'])
+
+
+class IPAMConfigTest(unittest.TestCase):
+ def test_create_ipam_config(self):
+ ipam_pool = IPAMPool(subnet='192.168.52.0/24',
+ gateway='192.168.52.254')
+
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool])
+ self.assertEqual(ipam_config, {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '192.168.52.0/24',
+ 'Gateway': '192.168.52.254',
+ 'AuxiliaryAddresses': None,
+ 'IPRange': None,
+ }]
+ })
+
+
+class ServiceModeTest(unittest.TestCase):
+ def test_replicated_simple(self):
+ mode = ServiceMode('replicated')
+ assert mode == {'replicated': {}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas is None
+
+ def test_global_simple(self):
+ mode = ServiceMode('global')
+ assert mode == {'global': {}}
+ assert mode.mode == 'global'
+ assert mode.replicas is None
+
+ def test_global_replicas_error(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('global', 21)
+
+ def test_replicated_replicas(self):
+ mode = ServiceMode('replicated', 21)
+ assert mode == {'replicated': {'Replicas': 21}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 21
+
+ def test_replicated_replicas_0(self):
+ mode = ServiceMode('replicated', 0)
+ assert mode == {'replicated': {'Replicas': 0}}
+ assert mode.mode == 'replicated'
+ assert mode.replicas == 0
+
+ def test_invalid_mode(self):
+ with pytest.raises(InvalidArgument):
+ ServiceMode('foobar')
+
+
+class MountTest(unittest.TestCase):
+ def test_parse_mount_string_ro(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:ro")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['ReadOnly'] is True
+
+ def test_parse_mount_string_rw(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz:rw")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_short_form(self):
+ mount = Mount.parse_mount_string("/foo/bar:/baz")
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_no_source(self):
+ mount = Mount.parse_mount_string("foo/bar")
+ assert mount['Source'] is None
+ assert mount['Target'] == "foo/bar"
+ assert not mount['ReadOnly']
+
+ def test_parse_mount_string_invalid(self):
+ with pytest.raises(InvalidArgument):
+ Mount.parse_mount_string("foo:bar:baz:rw")
+
+ def test_parse_mount_named_volume(self):
+ mount = Mount.parse_mount_string("foobar:/baz")
+ assert mount['Source'] == 'foobar'
+ assert mount['Target'] == '/baz'
+ assert mount['Type'] == 'volume'
+
+ def test_parse_mount_bind(self):
+ mount = Mount.parse_mount_string('/foo/bar:/baz')
+ assert mount['Source'] == "/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
+
+ @pytest.mark.xfail
+ def test_parse_mount_bind_windows(self):
+ with mock.patch('docker.types.services.IS_WINDOWS_PLATFORM', True):
+ mount = Mount.parse_mount_string('C:/foo/bar:/baz')
+ assert mount['Source'] == "C:/foo/bar"
+ assert mount['Target'] == "/baz"
+ assert mount['Type'] == 'bind'
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
new file mode 100644
index 0000000..b78af4e
--- /dev/null
+++ b/tests/unit/errors_test.py
@@ -0,0 +1,87 @@
+import unittest
+
+import requests
+
+from docker.errors import (APIError, DockerException,
+ create_unexpected_kwargs_error)
+
+
+class APIErrorTest(unittest.TestCase):
+ def test_api_error_is_caught_by_dockerexception(self):
+ try:
+ raise APIError("this should be caught by DockerException")
+ except DockerException:
+ pass
+
+ def test_status_code_200(self):
+ """The status_code property is present with 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.status_code == 200
+
+ def test_status_code_400(self):
+ """The status_code property is present with 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.status_code == 400
+
+ def test_status_code_500(self):
+ """The status_code property is present with 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.status_code == 500
+
+ def test_is_server_error_200(self):
+ """Report not server error on 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_300(self):
+ """Report not server error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_400(self):
+ """Report not server error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_500(self):
+ """Report server error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_server_error() is True
+
+ def test_is_client_error_500(self):
+ """Report not client error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_client_error() is False
+
+ def test_is_client_error_400(self):
+ """Report client error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_client_error() is True
+
+
+class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
+ def test_create_unexpected_kwargs_error_single(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
+ assert str(e) == "f() got an unexpected keyword argument 'foo'"
+
+ def test_create_unexpected_kwargs_error_multiple(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
+ assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index 65a8c42..ff0f1b6 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -6,6 +6,7 @@ CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
+FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
@@ -14,6 +15,7 @@ FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
+FAKE_NODE_ID = '24ifsmvkjbyhk'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
@@ -45,6 +47,17 @@ def get_fake_info():
return status_code, response
+def post_fake_auth():
+ status_code = 200
+ response = {'Status': 'Login Succeeded',
+ 'IdentityToken': '9cbaf023786cd7'}
+ return status_code, response
+
+
+def get_fake_ping():
+ return 200, "OK"
+
+
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
@@ -121,10 +134,12 @@ def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
- 'Config': {'Privileged': True, 'Tty': tty},
+ 'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
+ 'Name': 'foobar',
"State": {
+ "Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
@@ -139,11 +154,12 @@ def get_fake_inspect_container(tty=False):
def get_fake_inspect_image():
status_code = 200
response = {
- 'id': FAKE_IMAGE_ID,
- 'parent': "27cf784147099545",
- 'created': "2013-03-23T22:24:18.818426-07:00",
- 'container': FAKE_CONTAINER_ID,
- 'container_config':
+ 'Id': FAKE_IMAGE_ID,
+ 'Parent': "27cf784147099545",
+ 'Created': "2013-03-23T22:24:18.818426-07:00",
+ 'Container': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'bar': 'foo'}},
+ 'ContainerConfig':
{
"Hostname": "",
"User": "",
@@ -374,11 +390,13 @@ def get_fake_volume_list():
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism'
+ 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
+ 'Scope': 'local'
}
]
}
@@ -393,7 +411,8 @@ def get_fake_volume():
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
- }
+ },
+ 'Scope': 'local'
}
return status_code, response
@@ -406,6 +425,65 @@ def post_fake_update_container():
return 200, {'Warnings': []}
+def post_fake_update_node():
+ return 200, None
+
+
+def get_fake_network_list():
+ return 200, [{
+ "Name": "bridge",
+ "Id": FAKE_NETWORK_ID,
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": False,
+ "Internal": False,
+ "IPAM": {
+ "Driver": "default",
+ "Config": [
+ {
+ "Subnet": "172.17.0.0/16"
+ }
+ ]
+ },
+ "Containers": {
+ FAKE_CONTAINER_ID: {
+ "EndpointID": "ed2419a97c1d99",
+ "MacAddress": "02:42:ac:11:00:02",
+ "IPv4Address": "172.17.0.2/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500"
+ }
+ }]
+
+
+def get_fake_network():
+ return 200, get_fake_network_list()[1][0]
+
+
+def post_fake_network():
+ return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
+
+
+def delete_fake_network():
+ return 204, None
+
+
+def post_fake_network_connect():
+ return 200, None
+
+
+def post_fake_network_disconnect():
+ return 200, None
+
+
# Maps real api url to fake response callback
prefix = 'http+docker://localunixsocket'
if constants.IS_WINDOWS_PLATFORM:
@@ -418,6 +496,10 @@ fake_responses = {
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
+ '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ post_fake_auth,
+ '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ get_fake_ping,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
@@ -507,4 +589,28 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
+ ('{1}/{0}/nodes/{2}/update?version=1'.format(
+ CURRENT_VERSION, prefix, FAKE_NODE_ID
+ ), 'POST'):
+ post_fake_update_node,
+ ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ get_fake_network_list,
+ ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'GET'):
+ get_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'DELETE'):
+ delete_fake_network,
+ ('{1}/{0}/networks/{2}/connect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_connect,
+ ('{1}/{0}/networks/{2}/disconnect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_disconnect,
}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
new file mode 100644
index 0000000..47890ac
--- /dev/null
+++ b/tests/unit/fake_api_client.py
@@ -0,0 +1,61 @@
+import copy
+import docker
+
+from . import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class CopyReturnMagicMock(mock.MagicMock):
+ """
+ A MagicMock which deep copies every return value.
+ """
+ def _mock_call(self, *args, **kwargs):
+ ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ if isinstance(ret, (dict, list)):
+ ret = copy.deepcopy(ret)
+ return ret
+
+
+def make_fake_api_client():
+ """
+ Returns non-complete fake APIClient.
+
+ This returns most of the default cases correctly, but most arguments that
+ change behaviour will not work.
+ """
+ api_client = docker.APIClient()
+ mock_client = CopyReturnMagicMock(**{
+ 'build.return_value': fake_api.FAKE_IMAGE_ID,
+ 'commit.return_value': fake_api.post_fake_commit()[1],
+ 'containers.return_value': fake_api.get_fake_containers()[1],
+ 'create_container.return_value':
+ fake_api.post_fake_create_container()[1],
+ 'create_host_config.side_effect': api_client.create_host_config,
+ 'create_network.return_value': fake_api.post_fake_network()[1],
+ 'exec_create.return_value': fake_api.post_fake_exec_create()[1],
+ 'exec_start.return_value': fake_api.post_fake_exec_start()[1],
+ 'images.return_value': fake_api.get_fake_images()[1],
+ 'inspect_container.return_value':
+ fake_api.get_fake_inspect_container()[1],
+ 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
+ 'inspect_network.return_value': fake_api.get_fake_network()[1],
+ 'logs.return_value': 'hello world\n',
+ 'networks.return_value': fake_api.get_fake_network_list()[1],
+ 'start.return_value': None,
+ 'wait.return_value': 0,
+ })
+ mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
+ return mock_client
+
+
+def make_fake_client():
+ """
+ Returns a Client with a fake APIClient.
+ """
+ client = docker.DockerClient()
+ client.api = make_fake_api_client()
+ return client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
new file mode 100644
index 0000000..70c8648
--- /dev/null
+++ b/tests/unit/models_containers_test.py
@@ -0,0 +1,486 @@
+import docker
+from docker.models.containers import Container, _create_container_args
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
+from .fake_api_client import make_fake_client
+
+
+class ContainerCollectionTest(unittest.TestCase):
+ def test_run(self):
+ client = make_fake_client()
+ out = client.containers.run("alpine", "echo hello world")
+
+ assert out == 'hello world\n'
+
+ client.api.create_container.assert_called_with(
+ image="alpine",
+ command="echo hello world",
+ detach=False,
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.logs.assert_called_with(
+ FAKE_CONTAINER_ID,
+ stderr=False,
+ stdout=True
+ )
+
+ def test_create_container_args(self):
+ create_kwargs = _create_container_args(dict(
+ image='alpine',
+ command='echo hello world',
+ blkio_weight_device=[{'Path': 'foo', 'Weight': 3}],
+ blkio_weight=2,
+ cap_add=['foo'],
+ cap_drop=['bar'],
+ cgroup_parent='foobar',
+ cpu_period=1,
+ cpu_quota=2,
+ cpu_shares=5,
+ cpuset_cpus='0-3',
+ detach=False,
+ device_read_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_read_iops=[{'Path': 'foo', 'Rate': 3}],
+ device_write_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_write_iops=[{'Path': 'foo', 'Rate': 3}],
+ devices=['/dev/sda:/dev/xvda:rwm'],
+ dns=['8.8.8.8'],
+ domainname='example.com',
+ dns_opt=['foo'],
+ dns_search=['example.com'],
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ extra_hosts={'foo': '1.2.3.4'},
+ group_add=['blah'],
+ ipc_mode='foo',
+ kernel_memory=123,
+ labels={'key': 'value'},
+ links={'foo': 'bar'},
+ log_config={'Type': 'json-file', 'Config': {}},
+ lxc_conf={'foo': 'bar'},
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ mac_address='abc123',
+ mem_limit=123,
+ mem_reservation=123,
+ mem_swappiness=2,
+ memswap_limit=456,
+ name='somename',
+ network_disabled=False,
+ network='foo',
+ oom_kill_disable=True,
+ oom_score_adj=5,
+ pid_mode='host',
+ pids_limit=500,
+ ports={
+ 1111: 4567,
+ 2222: None
+ },
+ privileged=True,
+ publish_all_ports=True,
+ read_only=True,
+ restart_policy={'Name': 'always'},
+ security_opt=['blah'],
+ shm_size=123,
+ stdin_open=True,
+ stop_signal=9,
+ sysctls={'foo': 'bar'},
+ tmpfs={'/blah': ''},
+ tty=True,
+ ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ user='bob',
+ userns_mode='host',
+ version='1.23',
+ volume_driver='some_driver',
+ volumes=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
+ ],
+ volumes_from=['container'],
+ working_dir='/code'
+ ))
+
+ expected = dict(
+ image='alpine',
+ command='echo hello world',
+ domainname='example.com',
+ detach=False,
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ host_config={
+ 'Binds': [
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro'
+ ],
+ 'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}],
+ 'BlkioWeight': 2,
+ 'CapAdd': ['foo'],
+ 'CapDrop': ['bar'],
+ 'CgroupParent': 'foobar',
+ 'CpuPeriod': 1,
+ 'CpuQuota': 2,
+ 'CpuShares': 5,
+ 'CpusetCpus': '0-3',
+ 'Devices': [{'PathOnHost': '/dev/sda',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvda'}],
+ 'Dns': ['8.8.8.8'],
+ 'DnsOptions': ['foo'],
+ 'DnsSearch': ['example.com'],
+ 'ExtraHosts': ['foo:1.2.3.4'],
+ 'GroupAdd': ['blah'],
+ 'IpcMode': 'foo',
+ 'KernelMemory': 123,
+ 'Links': ['foo:bar'],
+ 'LogConfig': {'Type': 'json-file', 'Config': {}},
+ 'LxcConf': [{'Key': 'foo', 'Value': 'bar'}],
+ 'Memory': 123,
+ 'MemoryReservation': 123,
+ 'MemorySwap': 456,
+ 'MemorySwappiness': 2,
+ 'NetworkMode': 'foo',
+ 'OomKillDisable': True,
+ 'OomScoreAdj': 5,
+ 'PidMode': 'host',
+ 'PidsLimit': 500,
+ 'PortBindings': {
+ '1111/tcp': [{'HostIp': '', 'HostPort': '4567'}],
+ '2222/tcp': [{'HostIp': '', 'HostPort': ''}]
+ },
+ 'Privileged': True,
+ 'PublishAllPorts': True,
+ 'ReadonlyRootfs': True,
+ 'RestartPolicy': {'Name': 'always'},
+ 'SecurityOpt': ['blah'],
+ 'ShmSize': 123,
+ 'Sysctls': {'foo': 'bar'},
+ 'Tmpfs': {'/blah': ''},
+ 'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ 'UsernsMode': 'host',
+ 'VolumesFrom': ['container'],
+ },
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ labels={'key': 'value'},
+ mac_address='abc123',
+ name='somename',
+ network_disabled=False,
+ networking_config={'foo': None},
+ ports=[('1111', 'tcp'), ('2222', 'tcp')],
+ stdin_open=True,
+ stop_signal=9,
+ tty=True,
+ user='bob',
+ volume_driver='some_driver',
+ volumes=[
+ '/mnt/vol2',
+ '/mnt/vol1',
+ '/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath'
+ ],
+ working_dir='/code'
+ )
+
+ assert create_kwargs == expected
+
+ def test_run_detach(self):
+ client = make_fake_client()
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='sleep 300',
+ detach=True,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_run_pull(self):
+ client = make_fake_client()
+
+ # raise exception on first call, then return normal value
+ client.api.create_container.side_effect = [
+ docker.errors.ImageNotFound(""),
+ client.api.create_container.return_value
+ ]
+
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.pull.assert_called_with('alpine', tag=None)
+
+ def test_run_with_error(self):
+ client = make_fake_client()
+ client.api.logs.return_value = "some error"
+ client.api.wait.return_value = 1
+
+ with self.assertRaises(docker.errors.ContainerError) as cm:
+ client.containers.run('alpine', 'echo hello world')
+ assert cm.exception.exit_status == 1
+ assert "some error" in str(cm.exception)
+
+ def test_run_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.run(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ detach=False,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+
+ def test_run_remove(self):
+ client = make_fake_client()
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.api.wait.return_value = 1
+ with self.assertRaises(docker.errors.ContainerError):
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api.wait.return_value = 1
+ with self.assertRaises(docker.errors.ContainerError):
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ with self.assertRaises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ def test_create(self):
+ client = make_fake_client()
+ container = client.containers.create(
+ 'alpine',
+ 'echo hello world',
+ environment={'FOO': 'BAR'}
+ )
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='echo hello world',
+ environment={'FOO': 'BAR'},
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_create_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.create(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ host_config={'NetworkMode': 'default'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ containers = client.containers.list(all=True)
+ client.api.containers.assert_called_with(
+ all=True,
+ before=None,
+ filters=None,
+ limit=-1,
+ since=None
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ assert len(containers) == 1
+ assert isinstance(containers[0], Container)
+ assert containers[0].id == FAKE_CONTAINER_ID
+
+
+class ContainerTest(unittest.TestCase):
+ def test_name(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.name == 'foobar'
+
+ def test_status(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.status == "running"
+
+ def test_attach(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attach(stream=True)
+ client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True)
+
+ def test_commit(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ image = container.commit()
+ client.api.commit.assert_called_with(FAKE_CONTAINER_ID,
+ repository=None,
+ tag=None)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_diff(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.diff()
+ client.api.diff.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_exec_run(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("echo hello world", privileged=True, stream=True)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user='', environment=None
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ )
+
+ def test_export(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.export()
+ client.api.export.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_get_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.get_archive('foo')
+ client.api.get_archive.assert_called_with(FAKE_CONTAINER_ID, 'foo')
+
+ def test_image(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.image.id == FAKE_IMAGE_ID
+
+ def test_kill(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.kill(signal=5)
+ client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5)
+
+ def test_labels(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.labels == {'foo': 'bar'}
+
+ def test_logs(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.logs()
+ client.api.logs.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_pause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.pause()
+ client.api.pause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_put_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.put_archive('path', 'foo')
+ client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID,
+ 'path', 'foo')
+
+ def test_remove(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.remove()
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_rename(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.rename("foo")
+ client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo")
+
+ def test_resize(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.resize(1, 2)
+ client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2)
+
+ def test_restart(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.restart()
+ client.api.restart.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_start(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.start()
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stats(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stats()
+ client.api.stats.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stop(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stop()
+ client.api.stop.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_top(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.top()
+ client.api.top.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_unpause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.unpause()
+ client.api.unpause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_update(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.update(cpu_shares=2)
+ client.api.update_container.assert_called_with(FAKE_CONTAINER_ID,
+ cpu_shares=2)
+
+ def test_wait(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.wait()
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
new file mode 100644
index 0000000..9ecb7e4
--- /dev/null
+++ b/tests/unit/models_images_test.py
@@ -0,0 +1,112 @@
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+ def test_build(self):
+ client = make_fake_client()
+ image = client.images.build()
+ client.api.build.assert_called_with()
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_get(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_labels(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ assert image.labels == {'bar': 'foo'}
+
+ def test_list(self):
+ client = make_fake_client()
+ images = client.images.list(all=True)
+ client.api.images.assert_called_with(all=True, name=None, filters=None)
+ assert len(images) == 1
+ assert isinstance(images[0], Image)
+ assert images[0].id == FAKE_IMAGE_ID
+
+ def test_load(self):
+ client = make_fake_client()
+ client.images.load('byte stream')
+ client.api.load_image.assert_called_with('byte stream')
+
+ def test_pull(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image')
+ client.api.pull.assert_called_with('test_image', tag=None)
+ client.api.inspect_image.assert_called_with('test_image')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_push(self):
+ client = make_fake_client()
+ client.images.push('foobar', insecure_registry=True)
+ client.api.push.assert_called_with(
+ 'foobar',
+ tag=None,
+ insecure_registry=True
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ client.images.remove('test_image')
+ client.api.remove_image.assert_called_with('test_image')
+
+ def test_search(self):
+ client = make_fake_client()
+ client.images.search('test')
+ client.api.search.assert_called_with('test')
+
+
+class ImageTest(unittest.TestCase):
+ def test_short_id(self):
+ image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'sha256:b684607067'
+
+ image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'b684607067'
+
+ def test_tags(self):
+ image = Image(attrs={
+ 'RepoTags': ['test_image:latest']
+ })
+ assert image.tags == ['test_image:latest']
+
+ image = Image(attrs={
+ 'RepoTags': ['<none>:<none>']
+ })
+ assert image.tags == []
+
+ image = Image(attrs={
+ 'RepoTags': None
+ })
+ assert image.tags == []
+
+ def test_history(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.history()
+ client.api.history.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_save(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.save()
+ client.api.get_image.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_tag(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.tag('foo')
+ client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py
new file mode 100644
index 0000000..943b904
--- /dev/null
+++ b/tests/unit/models_networks_test.py
@@ -0,0 +1,64 @@
+import unittest
+
+from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+
+ def test_create(self):
+ client = make_fake_client()
+ network = client.networks.create("foobar", labels={'foo': 'bar'})
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+ assert client.api.create_network.called_once_with(
+ "foobar",
+ labels={'foo': 'bar'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ networks = client.networks.list()
+ assert networks[0].id == FAKE_NETWORK_ID
+ assert client.api.networks.called_once_with()
+
+ client = make_fake_client()
+ client.networks.list(ids=["abc"])
+ assert client.api.networks.called_once_with(ids=["abc"])
+
+ client = make_fake_client()
+ client.networks.list(names=["foobar"])
+ assert client.api.networks.called_once_with(names=["foobar"])
+
+
+class ImageTest(unittest.TestCase):
+
+ def test_connect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.connect(FAKE_CONTAINER_ID)
+ assert client.api.connect_container_to_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_disconnect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.disconnect(FAKE_CONTAINER_ID)
+ assert client.api.disconnect_container_from_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.remove()
+ assert client.api.remove_network.called_once_with(FAKE_NETWORK_ID)
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
new file mode 100644
index 0000000..5af24ee
--- /dev/null
+++ b/tests/unit/models_resources_test.py
@@ -0,0 +1,28 @@
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ModelTest(unittest.TestCase):
+ def test_reload(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attrs['Name'] = "oldname"
+ container.reload()
+ assert client.api.inspect_container.call_count == 2
+ assert container.attrs['Name'] == "foobar"
+
+ def test_hash(self):
+ client = make_fake_client()
+ container1 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set = set([container1])
+ assert len(my_set) == 1
+
+ container2 = client.containers.get(FAKE_CONTAINER_ID)
+ my_set.add(container2)
+ assert len(my_set) == 1
+
+ image1 = client.images.get(FAKE_CONTAINER_ID)
+ my_set.add(image1)
+ assert len(my_set) == 2
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
new file mode 100644
index 0000000..e7e317d
--- /dev/null
+++ b/tests/unit/models_services_test.py
@@ -0,0 +1,53 @@
+import unittest
+from docker.models.services import _get_create_service_kwargs
+
+
+class CreateServiceKwargsTest(unittest.TestCase):
+ def test_get_create_service_kwargs(self):
+ kwargs = _get_create_service_kwargs('test', {
+ 'image': 'foo',
+ 'command': 'true',
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'hostname': 'test_host',
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ 'container_labels': {'containerkey': 'containervalue'},
+ 'resources': {'foo': 'bar'},
+ 'restart_policy': {'restart': 'policy'},
+ 'log_driver': 'logdriver',
+ 'log_driver_options': {'foo': 'bar'},
+ 'args': ['some', 'args'],
+ 'env': {'FOO': 'bar'},
+ 'workdir': '/',
+ 'user': 'bob',
+ 'mounts': [{'some': 'mounts'}],
+ 'stop_grace_period': 5,
+ 'constraints': ['foo=bar'],
+ })
+
+ task_template = kwargs.pop('task_template')
+
+ assert kwargs == {
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ }
+ assert set(task_template.keys()) == set([
+ 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
+ 'LogDriver'
+ ])
+ assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ assert task_template['LogDriver'] == {
+ 'Name': 'logdriver',
+ 'Options': {'foo': 'bar'}
+ }
+ assert set(task_template['ContainerSpec'].keys()) == set([
+ 'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
+ 'Labels', 'Mounts', 'StopGracePeriod'
+ ])
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 2ad1cad..2b7ce52 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -1,4 +1,5 @@
-from docker.ssladapter import ssladapter
+import unittest
+from docker.transport import ssladapter
try:
from backports.ssl_match_hostname import (
@@ -16,19 +17,18 @@ except ImportError:
OP_NO_SSLv3 = 0x2000000
OP_NO_TLSv1 = 0x4000000
-from .. import base
-
-class SSLAdapterTest(base.BaseTestCase):
+class SSLAdapterTest(unittest.TestCase):
def test_only_uses_tls(self):
ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
assert ssl_context.options & OP_NO_SSLv3
- assert ssl_context.options & OP_NO_SSLv2
+ # if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
+ assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
assert not ssl_context.options & OP_NO_TLSv1
-class MatchHostnameTest(base.BaseTestCase):
+class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
(('countryName', u'US'),),
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
new file mode 100644
index 0000000..374f8b2
--- /dev/null
+++ b/tests/unit/swarm_test.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+import json
+
+from . import fake_api
+from ..helpers import requires_api_version
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
+
+
+class SwarmTest(BaseAPIClientTest):
+ @requires_api_version('1.24')
+ def test_node_update(self):
+ node_spec = {
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+
+ self.client.update_node(
+ node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec
+ )
+ args = fake_request.call_args
+ self.assertEqual(
+ args[0][1], url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
+ )
+ self.assertEqual(
+ json.loads(args[1]['data']), node_spec
+ )
+ self.assertEqual(
+ args[1]['headers']['Content-Type'], 'application/json'
+ )
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
new file mode 100644
index 0000000..f7aefd0
--- /dev/null
+++ b/tests/unit/utils_json_stream_test.py
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 290874f..a2d463d 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -5,30 +5,28 @@ import json
import os
import os.path
import shutil
+import socket
import sys
import tarfile
import tempfile
+import unittest
import pytest
import six
-from docker.client import Client
-from docker.constants import (
- DEFAULT_DOCKER_API_VERSION, IS_WINDOWS_PLATFORM
-)
-from docker.errors import DockerException, InvalidVersion
+from docker.api.client import APIClient
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
- create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
- exclude_paths, convert_volume_binds, decode_json_header, tar,
- split_command, create_ipam_config, create_ipam_pool, parse_devices,
- update_headers
+ parse_bytes, parse_env_file, exclude_paths, convert_volume_binds,
+ decode_json_header, tar, split_command, parse_devices, update_headers,
)
+from docker.utils.build import should_check_directory
from docker.utils.ports import build_port_bindings, split_port
-from docker.utils.utils import create_endpoint_config, format_environment
+from docker.utils.utils import format_environment
-from .. import base
from ..helpers import make_tree
@@ -38,7 +36,7 @@ TEST_CERT_DIR = os.path.join(
)
-class DecoratorsTest(base.BaseTestCase):
+class DecoratorsTest(unittest.TestCase):
def test_update_headers(self):
sample_headers = {
'X-Docker-Locale': 'en-US',
@@ -47,7 +45,7 @@ class DecoratorsTest(base.BaseTestCase):
def f(self, headers=None):
return headers
- client = Client()
+ client = APIClient()
client._auth_configs = {}
g = update_headers(f)
@@ -69,204 +67,7 @@ class DecoratorsTest(base.BaseTestCase):
}
-class HostConfigTest(base.BaseTestCase):
- def test_create_host_config_no_options(self):
- config = create_host_config(version='1.19')
- self.assertFalse('NetworkMode' in config)
-
- def test_create_host_config_no_options_newer_api_version(self):
- config = create_host_config(version='1.20')
- self.assertEqual(config['NetworkMode'], 'default')
-
- def test_create_host_config_invalid_cpu_cfs_types(self):
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota=23.11)
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period=1999.0)
-
- def test_create_host_config_with_cpu_quota(self):
- config = create_host_config(version='1.20', cpu_quota=1999)
- self.assertEqual(config.get('CpuQuota'), 1999)
-
- def test_create_host_config_with_cpu_period(self):
- config = create_host_config(version='1.20', cpu_period=1999)
- self.assertEqual(config.get('CpuPeriod'), 1999)
-
- def test_create_host_config_with_blkio_constraints(self):
- blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
- config = create_host_config(version='1.22',
- blkio_weight=1999,
- blkio_weight_device=blkio_rate,
- device_read_bps=blkio_rate,
- device_write_bps=blkio_rate,
- device_read_iops=blkio_rate,
- device_write_iops=blkio_rate)
-
- self.assertEqual(config.get('BlkioWeight'), 1999)
- self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
- self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
- self.assertEqual(blkio_rate[0]['Rate'], 1000)
-
- def test_create_host_config_with_shm_size(self):
- config = create_host_config(version='1.22', shm_size=67108864)
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_shm_size_in_mb(self):
- config = create_host_config(version='1.22', shm_size='64M')
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_oom_kill_disable(self):
- config = create_host_config(version='1.20', oom_kill_disable=True)
- self.assertEqual(config.get('OomKillDisable'), True)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.18.3',
- oom_kill_disable=True))
-
- def test_create_host_config_with_userns_mode(self):
- config = create_host_config(version='1.23', userns_mode='host')
- self.assertEqual(config.get('UsernsMode'), 'host')
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.22',
- userns_mode='host'))
- self.assertRaises(
- ValueError, lambda: create_host_config(version='1.23',
- userns_mode='host12'))
-
- def test_create_host_config_with_oom_score_adj(self):
- config = create_host_config(version='1.22', oom_score_adj=100)
- self.assertEqual(config.get('OomScoreAdj'), 100)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.21',
- oom_score_adj=100))
- self.assertRaises(
- TypeError, lambda: create_host_config(version='1.22',
- oom_score_adj='100'))
-
- def test_create_host_config_with_dns_opt(self):
-
- tested_opts = ['use-vc', 'no-tld-query']
- config = create_host_config(version='1.21', dns_opt=tested_opts)
- dns_opts = config.get('DnsOptions')
-
- self.assertTrue('use-vc' in dns_opts)
- self.assertTrue('no-tld-query' in dns_opts)
-
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.20',
- dns_opt=tested_opts))
-
- def test_create_endpoint_config_with_aliases(self):
- config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
- assert config == {'Aliases': ['foo', 'bar']}
-
- with pytest.raises(InvalidVersion):
- create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
-
- def test_create_host_config_with_mem_reservation(self):
- config = create_host_config(version='1.21', mem_reservation=67108864)
- self.assertEqual(config.get('MemoryReservation'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', mem_reservation=67108864))
-
- def test_create_host_config_with_kernel_memory(self):
- config = create_host_config(version='1.21', kernel_memory=67108864)
- self.assertEqual(config.get('KernelMemory'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', kernel_memory=67108864))
-
- def test_create_host_config_with_pids_limit(self):
- config = create_host_config(version='1.23', pids_limit=1024)
- self.assertEqual(config.get('PidsLimit'), 1024)
-
- with pytest.raises(InvalidVersion):
- create_host_config(version='1.22', pids_limit=1024)
- with pytest.raises(TypeError):
- create_host_config(version='1.22', pids_limit='1024')
-
-
-class UlimitTest(base.BaseTestCase):
- def test_create_host_config_dict_ulimit(self):
- ulimit_dct = {'name': 'nofile', 'soft': 8096}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_dict_ulimit_capitals(self):
- ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
- self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_obj_ulimit(self):
- ulimit_dct = Ulimit(name='nofile', soft=8096)
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj, ulimit_dct)
-
- def test_ulimit_invalid_type(self):
- self.assertRaises(ValueError, lambda: Ulimit(name=None))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
-
-
-class LogConfigTest(base.BaseTestCase):
- def test_create_host_config_dict_logconfig(self):
- dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=dct
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(dct['type'], config['LogConfig'].type)
-
- def test_create_host_config_obj_logconfig(self):
- obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=obj
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(obj, config['LogConfig'])
-
- def test_logconfig_invalid_config_type(self):
- with pytest.raises(ValueError):
- LogConfig(type=LogConfig.types.JSON, config='helloworld')
-
-
-class KwargsFromEnvTest(base.BaseTestCase):
+class KwargsFromEnvTest(unittest.TestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -294,7 +95,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
self.assertEqual(False, kwargs['tls'].assert_hostname)
self.assertTrue(kwargs['tls'].verify)
try:
- client = Client(**kwargs)
+ client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].ca_cert, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
@@ -313,7 +114,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
self.assertEqual(True, kwargs['tls'].assert_hostname)
self.assertEqual(False, kwargs['tls'].verify)
try:
- client = Client(**kwargs)
+ client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].cert, client.cert)
self.assertFalse(kwargs['tls'].verify)
@@ -366,7 +167,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
assert 'tls' not in kwargs
-class ConverVolumeBindsTest(base.BaseTestCase):
+class ConverVolumeBindsTest(unittest.TestCase):
def test_convert_volume_binds_empty(self):
self.assertEqual(convert_volume_binds({}), [])
self.assertEqual(convert_volume_binds([]), [])
@@ -425,7 +226,7 @@ class ConverVolumeBindsTest(base.BaseTestCase):
)
-class ParseEnvFileTest(base.BaseTestCase):
+class ParseEnvFileTest(unittest.TestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
@@ -456,10 +257,18 @@ class ParseEnvFileTest(base.BaseTestCase):
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
- get_parse_env_file = parse_env_file((env_file))
+ get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
os.unlink(env_file)
+ def test_parse_env_file_newline(self):
+ env_file = self.generate_tempfile(
+ file_content='\nUSER=jdoe\n\n\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ self.assertEqual(get_parse_env_file,
+ {'USER': 'jdoe', 'PASS': 'secret'})
+ os.unlink(env_file)
+
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
@@ -468,7 +277,7 @@ class ParseEnvFileTest(base.BaseTestCase):
os.unlink(env_file)
-class ParseHostTest(base.BaseTestCase):
+class ParseHostTest(unittest.TestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
@@ -530,7 +339,7 @@ class ParseHostTest(base.BaseTestCase):
assert parse_host(host_value) == expected_result
-class ParseRepositoryTagTest(base.BaseTestCase):
+class ParseRepositoryTagTest(unittest.TestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
@@ -576,7 +385,7 @@ class ParseRepositoryTagTest(base.BaseTestCase):
)
-class ParseDeviceTest(base.BaseTestCase):
+class ParseDeviceTest(unittest.TestCase):
def test_dict(self):
devices = parse_devices([{
'PathOnHost': '/dev/sda1',
@@ -635,7 +444,7 @@ class ParseDeviceTest(base.BaseTestCase):
})
-class ParseBytesTest(base.BaseTestCase):
+class ParseBytesTest(unittest.TestCase):
def test_parse_bytes_valid(self):
self.assertEqual(parse_bytes("512MB"), 536870912)
self.assertEqual(parse_bytes("512M"), 536870912)
@@ -655,7 +464,7 @@ class ParseBytesTest(base.BaseTestCase):
)
-class UtilsTest(base.BaseTestCase):
+class UtilsTest(unittest.TestCase):
longMessage = True
def test_convert_filters(self):
@@ -679,23 +488,8 @@ class UtilsTest(base.BaseTestCase):
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
- def test_create_ipam_config(self):
- ipam_pool = create_ipam_pool(subnet='192.168.52.0/24',
- gateway='192.168.52.254')
-
- ipam_config = create_ipam_config(pool_configs=[ipam_pool])
- self.assertEqual(ipam_config, {
- 'Driver': 'default',
- 'Config': [{
- 'Subnet': '192.168.52.0/24',
- 'Gateway': '192.168.52.254',
- 'AuxiliaryAddresses': None,
- 'IPRange': None,
- }]
- })
-
-class SplitCommandTest(base.BaseTestCase):
+class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
@@ -704,7 +498,7 @@ class SplitCommandTest(base.BaseTestCase):
self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
-class PortsTest(base.BaseTestCase):
+class PortsTest(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
@@ -736,6 +530,11 @@ class PortsTest(base.BaseTestCase):
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, ["1000", "1001"])
+ def test_split_port_random_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000")
+ self.assertEqual(internal_port, ["2000"])
+ self.assertEqual(external_port, ["1000-1001"])
+
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
self.assertEqual(internal_port, ["2000"])
@@ -753,6 +552,12 @@ class PortsTest(base.BaseTestCase):
self.assertEqual(external_port,
[("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
+ def test_split_port_with_ipv6_address(self):
+ internal_port, external_port = split_port(
+ "2001:abcd:ef00::2:1000:2000")
+ self.assertEqual(internal_port, ["2000"])
+ self.assertEqual(external_port, [("2001:abcd:ef00::2", "1000")])
+
def test_split_port_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000:tcp"))
@@ -775,6 +580,16 @@ class PortsTest(base.BaseTestCase):
self.assertRaises(ValueError,
lambda: split_port("localhost:"))
+ def test_with_no_container_port(self):
+ self.assertRaises(ValueError,
+ lambda: split_port("localhost:80:"))
+
+ def test_split_port_empty_string(self):
+ self.assertRaises(ValueError, lambda: split_port(""))
+
+ def test_split_port_non_string(self):
+ assert split_port(1243) == (['1243'], None)
+
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
@@ -812,12 +627,14 @@ class PortsTest(base.BaseTestCase):
def convert_paths(collection):
- if not IS_WINDOWS_PLATFORM:
- return collection
- return set(map(lambda x: x.replace('/', '\\'), collection))
+ return set(map(convert_path, collection))
+
+
+def convert_path(path):
+ return path.replace('/', os.path.sep)
-class ExcludePathsTest(base.BaseTestCase):
+class ExcludePathsTest(unittest.TestCase):
dirs = [
'foo',
'foo/bar',
@@ -879,12 +696,12 @@ class ExcludePathsTest(base.BaseTestCase):
set(['Dockerfile.alt', '.dockerignore'])
assert self.exclude(['*'], dockerfile='foo/Dockerfile3') == \
- set(['foo/Dockerfile3', '.dockerignore'])
+ convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
- assert 'foo/Dockerfile3' in includes
- assert 'foo/a.py' not in includes
+ assert convert_path('foo/Dockerfile3') in includes
+ assert convert_path('foo/a.py') not in includes
def test_single_filename(self):
assert self.exclude(['a.py']) == convert_paths(
@@ -986,6 +803,16 @@ class ExcludePathsTest(base.BaseTestCase):
])
)
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_directory_with_subdir_exception_win32_pathsep(self):
+ assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
self.all_paths - set([
@@ -998,8 +825,27 @@ class ExcludePathsTest(base.BaseTestCase):
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_subdirectory_win32_pathsep(self):
+ assert self.exclude(['foo\\bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_double_wildcard(self):
+ assert self.exclude(['**/a.py']) == convert_paths(
+ self.all_paths - set(
+ ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
+ )
+ )
+
+ assert self.exclude(['foo/**/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
-class TarTest(base.Cleanup, base.BaseTestCase):
+class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
dirs = [
'foo',
@@ -1082,8 +928,86 @@ class TarTest(base.Cleanup, base.BaseTestCase):
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
+ def test_tar_socket_file(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ sock = socket.socket(socket.AF_UNIX)
+ self.addCleanup(sock.close)
+ sock.bind(os.path.join(base, 'test.sock'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ self.assertEqual(
+ sorted(tar_data.getnames()), ['bar', 'foo']
+ )
+
+
+class ShouldCheckDirectoryTest(unittest.TestCase):
+ exclude_patterns = [
+ 'exclude_rather_large_directory',
+ 'dir/with/subdir_excluded',
+ 'dir/with/exceptions'
+ ]
+
+ include_patterns = [
+ 'dir/with/exceptions/like_this_one',
+ 'dir/with/exceptions/in/descendents'
+ ]
+
+ def test_should_check_directory_not_excluded(self):
+ assert should_check_directory(
+ 'not_excluded', self.exclude_patterns, self.include_patterns
+ )
+ assert should_check_directory(
+ convert_path('dir/with'), self.exclude_patterns,
+ self.include_patterns
+ )
+
+ def test_shoud_check_parent_directories_of_excluded(self):
+ assert should_check_directory(
+ 'dir', self.exclude_patterns, self.include_patterns
+ )
+ assert should_check_directory(
+ convert_path('dir/with'), self.exclude_patterns,
+ self.include_patterns
+ )
+
+ def test_should_not_check_excluded_directories_with_no_exceptions(self):
+ assert not should_check_directory(
+ 'exclude_rather_large_directory', self.exclude_patterns,
+ self.include_patterns
+ )
+ assert not should_check_directory(
+ convert_path('dir/with/subdir_excluded'), self.exclude_patterns,
+ self.include_patterns
+ )
+
+ def test_should_check_excluded_directory_with_exceptions(self):
+ assert should_check_directory(
+ convert_path('dir/with/exceptions'), self.exclude_patterns,
+ self.include_patterns
+ )
+ assert should_check_directory(
+ convert_path('dir/with/exceptions/in'), self.exclude_patterns,
+ self.include_patterns
+ )
+
+ def test_should_not_check_siblings_of_exceptions(self):
+ assert not should_check_directory(
+ convert_path('dir/with/exceptions/but_not_here'),
+ self.exclude_patterns, self.include_patterns
+ )
+
+ def test_should_check_subdirectories_of_exceptions(self):
+ assert should_check_directory(
+ convert_path('dir/with/exceptions/like_this_one/subdir'),
+ self.exclude_patterns, self.include_patterns
+ )
+
-class FormatEnvironmentTest(base.BaseTestCase):
+class FormatEnvironmentTest(unittest.TestCase):
def test_format_env_binary_unicode_value(self):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'