summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
Diffstat (limited to 'docker')
-rw-r--r--docker/__init__.py3
-rw-r--r--docker/api/build.py69
-rw-r--r--docker/api/client.py167
-rw-r--r--docker/api/config.py17
-rw-r--r--docker/api/container.py192
-rw-r--r--docker/api/daemon.py30
-rw-r--r--docker/api/exec_api.py20
-rw-r--r--docker/api/image.py105
-rw-r--r--docker/api/network.py16
-rw-r--r--docker/api/plugin.py6
-rw-r--r--docker/api/secret.py9
-rw-r--r--docker/api/service.py66
-rw-r--r--docker/api/swarm.py107
-rw-r--r--docker/api/volume.py8
-rw-r--r--docker/auth.py416
-rw-r--r--docker/client.py28
-rw-r--r--docker/constants.py28
-rw-r--r--docker/context/__init__.py3
-rw-r--r--docker/context/api.py203
-rw-r--r--docker/context/config.py81
-rw-r--r--docker/context/context.py243
-rw-r--r--docker/credentials/__init__.py4
-rw-r--r--docker/credentials/constants.py4
-rw-r--r--docker/credentials/errors.py25
-rw-r--r--docker/credentials/store.py94
-rw-r--r--docker/credentials/utils.py38
-rw-r--r--docker/errors.py61
-rw-r--r--docker/models/configs.py2
-rw-r--r--docker/models/containers.py122
-rw-r--r--docker/models/images.py101
-rw-r--r--docker/models/networks.py5
-rw-r--r--docker/models/plugins.py7
-rw-r--r--docker/models/resource.py9
-rw-r--r--docker/models/secrets.py3
-rw-r--r--docker/models/services.py104
-rw-r--r--docker/models/swarm.py34
-rw-r--r--docker/tls.py16
-rw-r--r--docker/transport/__init__.py11
-rw-r--r--docker/transport/basehttpadapter.py8
-rw-r--r--docker/transport/npipeconn.py33
-rw-r--r--docker/transport/npipesocket.py24
-rw-r--r--docker/transport/sshconn.py255
-rw-r--r--docker/transport/ssladapter.py8
-rw-r--r--docker/transport/unixconn.py48
-rw-r--r--docker/types/__init__.py9
-rw-r--r--docker/types/base.py5
-rw-r--r--docker/types/containers.py245
-rw-r--r--docker/types/daemon.py18
-rw-r--r--docker/types/healthcheck.py14
-rw-r--r--docker/types/networks.py11
-rw-r--r--docker/types/services.py150
-rw-r--r--docker/utils/build.py30
-rw-r--r--docker/utils/config.py6
-rw-r--r--docker/utils/decorators.py2
-rw-r--r--docker/utils/fnmatch.py2
-rw-r--r--docker/utils/json_stream.py13
-rw-r--r--docker/utils/ports.py10
-rw-r--r--docker/utils/proxy.py73
-rw-r--r--docker/utils/socket.py107
-rw-r--r--docker/utils/utils.py211
-rw-r--r--docker/version.py4
61 files changed, 2830 insertions, 913 deletions
diff --git a/docker/__init__.py b/docker/__init__.py
index cf732e1..e5c1a8f 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,9 @@
# flake8: noqa
from .api import APIClient
from .client import DockerClient, from_env
+from .context import Context
+from .context import ContextAPI
+from .tls import TLSConfig
from .version import version, version_info
__version__ = version
diff --git a/docker/api/build.py b/docker/api/build.py
index 419255f..aac43c4 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -12,14 +12,15 @@ from .. import utils
log = logging.getLogger(__name__)
-class BuildApiMixin(object):
+class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
- squash=None, extra_hosts=None, platform=None, isolation=None):
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
@@ -103,6 +104,10 @@ class BuildApiMixin(object):
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
A generator for the build output.
@@ -116,6 +121,7 @@ class BuildApiMixin(object):
remote = context = None
headers = {}
container_limits = container_limits or {}
+ buildargs = buildargs or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
@@ -126,7 +132,7 @@ class BuildApiMixin(object):
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
- 'Invalid container_limits key {0}'.format(key)
+ f'Invalid container_limits key {key}'
)
if custom_context:
@@ -144,7 +150,7 @@ class BuildApiMixin(object):
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
- with open(dockerignore, 'r') as f:
+ with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
@@ -168,6 +174,10 @@ class BuildApiMixin(object):
}
params.update(container_limits)
+ if use_config_proxy:
+ proxy_args = self._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
if buildargs:
params.update({'buildargs': json.dumps(buildargs)})
@@ -286,40 +296,32 @@ class BuildApiMixin(object):
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
- if not self._auth_configs:
+ if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
- self._auth_configs = auth.load_config()
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
- auth_data = {}
- if self._auth_configs.get('credsStore'):
- # Using a credentials store, we need to retrieve the
- # credentials for each registry listed in the config.json file
- # Matches CLI behavior: https://github.com/docker/docker/blob/
- # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
- # credentials/native_store.go#L68-L83
- for registry in self._auth_configs.get('auths', {}).keys():
- auth_data[registry] = auth.resolve_authconfig(
- self._auth_configs, registry,
- credstore_env=self.credstore_env,
- )
- else:
- auth_data = self._auth_configs.get('auths', {}).copy()
- # See https://github.com/docker/docker-py/issues/1683
- if auth.INDEX_NAME in auth_data:
- auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
- 'Sending auth config ({0})'.format(
+ 'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
- headers['X-Registry-Config'] = auth.encode_header(
- auth_data
- )
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
else:
log.debug('No auth config found')
@@ -331,13 +333,20 @@ def process_dockerfile(dockerfile, path):
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
-
+ if constants.IS_WINDOWS_PLATFORM and path.startswith(
+ constants.WINDOWS_LONGPATH_PREFIX):
+ abs_dockerfile = '{}{}'.format(
+ constants.WINDOWS_LONGPATH_PREFIX,
+ os.path.normpath(
+ abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]
+ )
+ )
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
- with open(abs_dockerfile, 'r') as df:
+ with open(abs_dockerfile) as df:
return (
- '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
diff --git a/docker/api/client.py b/docker/api/client.py
index 91da1c8..2667922 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -1,12 +1,25 @@
import json
import struct
+import urllib
from functools import partial
import requests
import requests.exceptions
-import six
import websocket
+from .. import auth
+from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
+from ..errors import (DockerException, InvalidVersion, TLSParameterError,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
+from ..utils import check_resource, config, update_headers, utils
+from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
@@ -19,23 +32,14 @@ from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
-from .. import auth
-from ..constants import (
- DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
- DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
- MINIMUM_DOCKER_API_VERSION
-)
-from ..errors import (
- DockerException, InvalidVersion, TLSParameterError,
- create_api_error_from_http_exception
-)
-from ..tls import TLSConfig
-from ..transport import SSLAdapter, UnixAdapter
-from ..utils import utils, check_resource, update_headers, config
-from ..utils.socket import frames_iter, socket_raw_iter
-from ..utils.json_stream import json_stream
+
+try:
+ from ..transport import NpipeHTTPAdapter
+except ImportError:
+ pass
+
try:
- from ..transport import NpipeAdapter
+ from ..transport import SSHHTTPAdapter
except ImportError:
pass
@@ -76,7 +80,7 @@ class APIClient(
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -85,6 +89,11 @@ class APIClient(
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@@ -95,9 +104,10 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
- credstore_env=None):
- super(APIClient, self).__init__()
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super().__init__()
if tls and not base_url:
raise TLSParameterError(
@@ -109,17 +119,31 @@ class APIClient(
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
self._auth_configs = auth.load_config(
- config_dict=self._general_configs
+ config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
- base_url, timeout, pool_connections=num_pools
+ self._custom_adapter = UnixHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
@@ -132,8 +156,9 @@ class APIClient(
'The npipe:// protocol is only supported on Windows'
)
try:
- self._custom_adapter = NpipeAdapter(
- base_url, timeout, pool_connections=num_pools
+ self._custom_adapter = NpipeHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
)
except NameError:
raise DockerException(
@@ -141,26 +166,40 @@ class APIClient(
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ try:
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
+ )
+ except NameError:
+ raise DockerException(
+ 'Install paramiko package to enable ssh:// support'
+ )
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
- self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self._custom_adapter = SSLHTTPAdapter(
+ pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
- if version is None:
- self._version = DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
+ if version is None or (isinstance(
+ version,
+ str
+ ) and version.lower() == 'auto'):
+ self._version = self._retrieve_server_version()
else:
+ self._version = version
+ if not isinstance(self._version, str):
raise DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
+ 'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
@@ -180,7 +219,7 @@ class APIClient(
)
except Exception as e:
raise DockerException(
- 'Error while fetching server API version: {0}'.format(e)
+ f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
@@ -207,21 +246,21 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
- if not isinstance(arg, six.string_types):
+ if not isinstance(arg, str):
raise ValueError(
- 'Expected a string but found {0} ({1}) '
+ 'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
- quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
+ return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+ return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -245,7 +284,7 @@ class APIClient(
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
- for k, v in six.iteritems(data):
+ for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
@@ -279,12 +318,12 @@ class APIClient(
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
- elif six.PY3:
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
+ else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
@@ -302,8 +341,7 @@ class APIClient(
if response.raw._fp.chunked:
if decode:
- for chunk in json_stream(self._stream_helper(response, False)):
- yield chunk
+ yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
@@ -359,22 +397,31 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
- for out in response.iter_content(chunk_size, decode):
- yield out
- def _read_from_socket(self, response, stream, tty=False):
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ yield from response.iter_content(chunk_size, decode)
+
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
- gen = None
- if tty is False:
- gen = frames_iter(socket)
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
else:
- gen = socket_raw_iter(socket)
+ # The generator will output strings
+ gen = (data for (_, data) in gen)
if stream:
return gen
else:
- return six.binary_type().join(gen)
+ # Wait for all the frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
@@ -420,7 +467,7 @@ class APIClient(
self._result(res, binary=True)
self._raise_for_status(res)
- sep = six.binary_type()
+ sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
@@ -434,7 +481,7 @@ class APIClient(
def get_adapter(self, url):
try:
- return super(APIClient, self).get_adapter(url)
+ return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
@@ -452,9 +499,11 @@ class APIClient(
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
- self._auth_configs = auth.load_config(dockercfg_path)
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
diff --git a/docker/api/config.py b/docker/api/config.py
index 767bef2..88c367e 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -1,13 +1,11 @@
import base64
-import six
-
from .. import utils
-class ConfigApiMixin(object):
+class ConfigApiMixin:
@utils.minimum_version('1.30')
- def create_config(self, name, data, labels=None):
+ def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
@@ -15,6 +13,9 @@ class ConfigApiMixin(object):
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
+ templating (dict): dictionary containing the name of the
+ templating driver to be used expressed as
+ { name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
@@ -22,12 +23,12 @@ class ConfigApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
- 'Labels': labels
+ 'Labels': labels,
+ 'Templating': templating
}
url = self._url('/configs/create')
@@ -42,7 +43,7 @@ class ConfigApiMixin(object):
Retrieve config metadata
Args:
- id (string): Full ID of the config to remove
+ id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/container.py b/docker/api/container.py
index d4f75f5..83fcd4f 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,19 +1,19 @@
-import six
from datetime import datetime
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
-from ..types import (
- CancellableStream, ContainerConfig, EndpointConfig, HostConfig,
- NetworkingConfig
-)
+from ..types import CancellableStream
+from ..types import ContainerConfig
+from ..types import EndpointConfig
+from ..types import HostConfig
+from ..types import NetworkingConfig
-class ContainerApiMixin(object):
+class ContainerApiMixin:
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
- stream=False, logs=False):
+ stream=False, logs=False, demux=False):
"""
Attach to a container.
@@ -28,11 +28,15 @@ class ContainerApiMixin(object):
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
+ demux (bool): Keep stdout and stderr separate.
Returns:
- By default, the container's output as a single string.
+ By default, the container's output as a single string (two if
+ ``demux=True``: one for stdout and one for stderr).
- If ``stream=True``, an iterator of output strings.
+ If ``stream=True``, an iterator of output strings. If
+ ``demux=True``, two iterators are returned: one for stdout and one
+ for stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -54,8 +58,7 @@ class ContainerApiMixin(object):
response = self._post(u, headers=headers, params=params, stream=True)
output = self._read_from_socket(
- response, stream, self._check_is_tty(container)
- )
+ response, stream, self._check_is_tty(container), demux=demux)
if stream:
return CancellableStream(output, response)
@@ -169,7 +172,8 @@ class ContainerApiMixin(object):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -218,7 +222,8 @@ class ContainerApiMixin(object):
working_dir=None, domainname=None, host_config=None,
mac_address=None, labels=None, stop_signal=None,
networking_config=None, healthcheck=None,
- stop_timeout=None, runtime=None):
+ stop_timeout=None, runtime=None,
+ use_config_proxy=True):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@@ -237,9 +242,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
@@ -251,22 +256,22 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
Or without host port assignment:
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
@@ -276,7 +281,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
@@ -284,7 +289,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
@@ -300,9 +305,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds={
+ host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
@@ -319,9 +324,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds=[
+ host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
@@ -339,15 +344,15 @@ class ContainerApiMixin(object):
.. code-block:: python
- networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config(
+ networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
- ctnr = docker_client.create_container(
+ ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -387,6 +392,10 @@ class ContainerApiMixin(object):
runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being created.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
@@ -397,9 +406,17 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
+ if isinstance(environment, dict):
+ environment = utils.utils.format_environment(environment)
+
+ if use_config_proxy:
+ environment = self._proxy_configs.inject_proxy_environment(
+ environment
+ ) or None
+
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open, tty,
ports, environment, volumes,
@@ -461,34 +478,33 @@ class ContainerApiMixin(object):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
init (bool): Run an init inside the container that forwards
signals and reaps processes
- init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
- isolation (str): Isolation technology to use. Default: `None`.
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ isolation (str): Isolation technology to use. Default: ``None``.
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration
lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
+ mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -500,11 +516,13 @@ class ContainerApiMixin(object):
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
- on the bridge network.
+ the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``port_bindings``.
+
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
@@ -513,7 +531,8 @@ class ContainerApiMixin(object):
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
- for more information.
+ for more information.
+ Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -543,10 +562,12 @@ class ContainerApiMixin(object):
}
ulimits (:py:class:`list`): Ulimits to set inside the container,
- as a list of dicts.
+ as a list of :py:class:`docker.types.Ulimit` instances.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
runtime (str): Runtime to use with this container.
@@ -558,7 +579,7 @@ class ContainerApiMixin(object):
Example:
- >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ >>> client.api.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
@@ -589,11 +610,11 @@ class ContainerApiMixin(object):
Example:
- >>> docker_client.create_network('network1')
- >>> networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config()
+ >>> client.api.create_network('network1')
+ >>> networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config()
})
- >>> container = docker_client.create_container(
+ >>> container = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -609,24 +630,27 @@ class ContainerApiMixin(object):
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
- links (:py:class:`list`): A list of links for this endpoint.
- Containers declared in this list will be linked to this
- container. Defaults to ``None``.
+ links (dict): Mapping of links for this endpoint using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to this
+ container using the provided alias. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
Example:
- >>> endpoint_config = client.create_endpoint_config(
+ >>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
- links=['app_db'],
+ links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
)
@@ -676,7 +700,8 @@ class ContainerApiMixin(object):
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('container')
- def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
@@ -687,6 +712,8 @@ class ContainerApiMixin(object):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -695,12 +722,29 @@ class ContainerApiMixin(object):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> c = docker.APIClient()
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = c.api.get_archive(container, '/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
params = {
'path': path
}
+ headers = {
+ "Accept-Encoding": "gzip, deflate"
+ } if encode_stream else {
+ "Accept-Encoding": "identity"
+ }
url = self._url('/containers/{0}/archive', container)
- res = self._get(url, params=params, stream=True)
+ res = self._get(url, params=params, stream=True, headers=headers)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
@@ -744,7 +788,7 @@ class ContainerApiMixin(object):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
- if not isinstance(signal, six.string_types):
+ if not isinstance(signal, str):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
@@ -763,16 +807,16 @@ class ContainerApiMixin(object):
Args:
container (str): The container to get logs from
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -870,7 +914,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- >>> cli.port('7174d6347063', 80)
+ >>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
@@ -888,9 +932,10 @@ class ContainerApiMixin(object):
if '/' in private_port:
return port_settings.get(private_port)
- h_ports = port_settings.get(private_port + '/tcp')
- if h_ports is None:
- h_ports = port_settings.get(private_port + '/udp')
+ for protocol in ['tcp', 'udp', 'sctp']:
+ h_ports = port_settings.get(private_port + '/' + protocol)
+ if h_ports:
+ break
return h_ports
@@ -1048,10 +1093,10 @@ class ContainerApiMixin(object):
Example:
- >>> container = cli.create_container(
+ >>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
- >>> cli.start(container=container.get('Id'))
+ >>> client.api.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
@@ -1072,7 +1117,8 @@ class ContainerApiMixin(object):
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -1086,6 +1132,10 @@ class ContainerApiMixin(object):
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
+ if decode:
+ raise errors.InvalidArgument(
+ "decode is only available in conjunction with stream=True"
+ )
return self._result(self._get(url, params={'stream': False}),
json=True)
@@ -1170,8 +1220,8 @@ class ContainerApiMixin(object):
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
- mem_limit (int or str): Memory limit
- mem_reservation (int or str): Memory soft limit
+ mem_limit (float or str): Memory limit
+ mem_reservation (float or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 76a94cf..a857213 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils
-class DaemonApiMixin(object):
+class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
@@ -42,8 +42,8 @@ class DaemonApiMixin(object):
Example:
- >>> for event in client.events()
- ... print event
+ >>> for event in client.events(decode=True)
+ ... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
@@ -54,7 +54,7 @@ class DaemonApiMixin(object):
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -109,7 +109,7 @@ class DaemonApiMixin(object):
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
- otherwise``$HOME/.dockercfg``)
+ otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
@@ -124,13 +124,15 @@ class DaemonApiMixin(object):
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
- self._auth_configs = auth.load_config(dockercfg_path)
- elif not self._auth_configs:
- self._auth_configs = auth.load_config()
-
- authcfg = auth.resolve_authconfig(
- self._auth_configs, registry, credstore_env=self.credstore_env,
- )
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
@@ -146,9 +148,7 @@ class DaemonApiMixin(object):
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- if 'auths' not in self._auth_configs:
- self._auth_configs['auths'] = {}
- self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 986d87f..496308a 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,10 +1,8 @@
-import six
-
from .. import errors
from .. import utils
-class ExecApiMixin(object):
+class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
@@ -45,7 +43,7 @@ class ExecApiMixin(object):
'Setting environment for exec is not supported in API < 1.25'
)
- if isinstance(cmd, six.string_types):
+ if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
@@ -118,7 +116,7 @@ class ExecApiMixin(object):
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
- socket=False):
+ socket=False, demux=False):
"""
Start a previously set up exec instance.
@@ -130,11 +128,15 @@ class ExecApiMixin(object):
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations.
+ demux (bool): Return stdout and stderr separately
Returns:
- (generator or str): If ``stream=True``, a generator yielding
- response chunks. If ``socket=True``, a socket object for the
- connection. A string containing response data otherwise.
+
+ (generator or str or tuple): If ``stream=True``, a generator
+ yielding response chunks. If ``socket=True``, a socket object for
+ the connection. A string containing response data otherwise. If
+ ``demux=True``, a tuple with two elements of type byte: stdout and
+ stderr.
Raises:
:py:class:`docker.errors.APIError`
@@ -162,4 +164,4 @@ class ExecApiMixin(object):
return self._result(res)
if socket:
return self._get_raw_response_socket(res)
- return self._read_from_socket(res, stream, tty)
+ return self._read_from_socket(res, stream, tty=tty, demux=demux)
diff --git a/docker/api/image.py b/docker/api/image.py
index 5f05d88..772d889 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,15 +1,13 @@
import logging
import os
-import six
-
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
-class ImageApiMixin(object):
+class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@@ -31,8 +29,8 @@ class ImageApiMixin(object):
Example:
- >>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
+ >>> image = client.api.get_image("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
@@ -70,7 +68,8 @@ class ImageApiMixin(object):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
@@ -80,10 +79,18 @@ class ImageApiMixin(object):
If the server returns an error.
"""
params = {
- 'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
+ if name:
+ if utils.version_lt(self._version, '1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ if filters:
+ filters['reference'] = name
+ else:
+ filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
@@ -121,7 +128,7 @@ class ImageApiMixin(object):
params = _import_image_params(
repository, tag, image,
- src=(src if isinstance(src, six.string_types) else None),
+ src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
@@ -130,7 +137,7 @@ class ImageApiMixin(object):
return self._result(
self._post(u, data=None, params=params)
)
- elif isinstance(src, six.string_types): # from file path
+ elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
@@ -247,12 +254,15 @@ class ImageApiMixin(object):
@utils.minimum_version('1.30')
@utils.check_resource('image')
- def inspect_distribution(self, image):
+ def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
@@ -261,9 +271,21 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ registry, _ = auth.resolve_repository_name(image)
+
+ headers = {}
+ if auth_config is None:
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ else:
+ log.debug('Sending supplied auth config')
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
+
+ url = self._url("/distribution/{0}/json", image)
return self._result(
- self._get(self._url("/distribution/{0}/json", image)), True
+ self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
@@ -327,21 +349,24 @@ class ImageApiMixin(object):
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
- decode=False, platform=None):
+ decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
- tag (str): The tag to pull
- stream (bool): Stream the output as a generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
+ is set to ``latest``.
+ stream (bool): Stream the output as a generator. Make sure to
+ consume the generator, otherwise pull might get cancelled.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags, the ``tag`` parameter is
+ ignored.
Returns:
(generator or str): The output
@@ -352,8 +377,8 @@ class ImageApiMixin(object):
Example:
- >>> for line in cli.pull('busybox', stream=True):
- ... print(json.dumps(json.loads(line), indent=4))
+ >>> for line in client.api.pull('busybox', stream=True, decode=True):
+ ... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
@@ -366,8 +391,12 @@ class ImageApiMixin(object):
}
"""
- if not tag:
- repository, tag = utils.parse_repository_tag(repository)
+ repository, image_tag = utils.parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
+
+ if all_tags:
+ tag = None
+
registry, repo_name = auth.resolve_repository_name(repository)
params = {
@@ -413,10 +442,9 @@ class ImageApiMixin(object):
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
@@ -428,12 +456,12 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
- >>> for line in cli.push('yourname/app', stream=True):
- ... print line
- {"status":"Pushing repository yourname/app (1 tags)"}
- {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
- {"status":"Image already pushed, skipping","progressDetail":{},
- "id":"511136ea3c5a"}
+ >>> for line in client.api.push('yourname/app', stream=True, decode=True):
+ ... print(line)
+ {'status': 'Pushing repository yourname/app (1 tags)'}
+ {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
+ {'status': 'Image already pushed, skipping', 'progressDetail':{},
+ 'id': '511136ea3c5a'}
...
"""
@@ -479,13 +507,14 @@ class ImageApiMixin(object):
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
- def search(self, term):
+ def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
+ limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
@@ -494,8 +523,12 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ params = {'term': term}
+ if limit is not None:
+ params['limit'] = limit
+
return self._result(
- self._get(self._url("/images/search"), params={'term': term}),
+ self._get(self._url("/images/search"), params=params),
True
)
@@ -519,7 +552,7 @@ class ImageApiMixin(object):
Example:
- >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
@@ -536,7 +569,7 @@ class ImageApiMixin(object):
def is_file(src):
try:
return (
- isinstance(src, six.string_types) and
+ isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
diff --git a/docker/api/network.py b/docker/api/network.py
index 57ed8d3..e95c5fc 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -4,10 +4,10 @@ from ..utils import version_lt
from .. import utils
-class NetworkApiMixin(object):
+class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
- List networks. Similar to the ``docker networks ls`` command.
+ List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
@@ -15,7 +15,8 @@ class NetworkApiMixin(object):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
+ such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
@@ -74,7 +75,7 @@ class NetworkApiMixin(object):
Example:
A network using the bridge driver:
- >>> client.create_network("network1", driver="bridge")
+ >>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
@@ -89,7 +90,7 @@ class NetworkApiMixin(object):
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
- >>> docker_client.create_network("network1", driver="bridge",
+ >>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
@@ -215,7 +216,7 @@ class NetworkApiMixin(object):
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
- link_local_ips=None):
+ link_local_ips=None, driver_opt=None):
"""
Connect a container to a network.
@@ -239,7 +240,8 @@ class NetworkApiMixin(object):
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
- ipv6_address=ipv6_address, link_local_ips=link_local_ips
+ ipv6_address=ipv6_address, link_local_ips=link_local_ips,
+ driver_opt=driver_opt
),
}
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index f6c0b13..57110f1 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -1,9 +1,7 @@
-import six
-
from .. import auth, utils
-class PluginApiMixin(object):
+class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
@@ -21,7 +19,7 @@ class PluginApiMixin(object):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
- data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
diff --git a/docker/api/secret.py b/docker/api/secret.py
index fa4c2ab..cd440b9 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -1,12 +1,10 @@
import base64
-import six
-
from .. import errors
from .. import utils
-class SecretApiMixin(object):
+class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
@@ -25,8 +23,7 @@ class SecretApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
@@ -53,7 +50,7 @@ class SecretApiMixin(object):
Retrieve secret metadata
Args:
- id (string): Full ID of the secret to remove
+ id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
diff --git a/docker/api/service.py b/docker/api/service.py
index 03b0ca6..371f541 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -2,7 +2,8 @@ from .. import auth, errors, utils
from ..types import ServiceMode
-def _check_api_features(version, task_template, update_config, endpoint_spec):
+def _check_api_features(version, task_template, update_config, endpoint_spec,
+ rollback_config):
def raise_version_error(param, min_version):
raise errors.InvalidVersion(
@@ -18,10 +19,24 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25')
+ if utils.version_lt(version, '1.28'):
+ if update_config.get('FailureAction') == 'rollback':
+ raise_version_error(
+ 'UpdateConfig.failure_action rollback', '1.28'
+ )
+
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29')
+ if rollback_config is not None:
+ if utils.version_lt(version, '1.28'):
+ raise_version_error('rollback_config', '1.28')
+
+ if utils.version_lt(version, '1.29'):
+ if 'Order' in update_config:
+ raise_version_error('RollbackConfig.order', '1.29')
+
if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
@@ -30,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
- raise_version_error('force_update', '1.25')
+ raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
@@ -73,6 +88,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')
+ if utils.version_lt(version, '1.38'):
+ if container_spec.get('Init') is not None:
+ raise_version_error('ContainerSpec.init', '1.38')
+
if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
@@ -94,12 +113,12 @@ def _merge_task_template(current, override):
return merged
-class ServiceApiMixin(object):
+class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
- endpoint_spec=None
+ endpoint_spec=None, rollback_config=None
):
"""
Create a service.
@@ -114,8 +133,11 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
@@ -129,7 +151,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
url = self._url('/services/create')
@@ -160,6 +183,9 @@ class ServiceApiMixin(object):
if update_config is not None:
data['UpdateConfig'] = update_config
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@@ -176,7 +202,8 @@ class ServiceApiMixin(object):
into the service inspect output.
Returns:
- ``True`` if successful.
+ (dict): A dictionary of the server-side representation of the
+ service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
@@ -336,7 +363,8 @@ class ServiceApiMixin(object):
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
- endpoint_spec=None, fetch_current_spec=False):
+ endpoint_spec=None, fetch_current_spec=False,
+ rollback_config=None):
"""
Update a service.
@@ -354,15 +382,18 @@ class ServiceApiMixin(object):
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
- networks (:py:class:`list`): List of network names or IDs to attach
- the service to. Default: ``None``.
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
- ``True`` if successful.
+ A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
@@ -370,7 +401,8 @@ class ServiceApiMixin(object):
"""
_check_api_features(
- self._version, task_template, update_config, endpoint_spec
+ self._version, task_template, update_config, endpoint_spec,
+ rollback_config
)
if fetch_current_spec:
@@ -416,6 +448,11 @@ class ServiceApiMixin(object):
else:
data['UpdateConfig'] = current.get('UpdateConfig')
+ if rollback_config is not None:
+ data['RollbackConfig'] = rollback_config
+ else:
+ data['RollbackConfig'] = current.get('RollbackConfig')
+
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
@@ -440,5 +477,4 @@ class ServiceApiMixin(object):
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
- self._raise_for_status(resp)
- return True
+ return self._result(resp, json=True)
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 04595da..db40fdd 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,6 @@
import logging
-from six.moves import http_client
+import http.client as http_client
+from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
from .. import utils
@@ -7,7 +8,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class SwarmApiMixin(object):
+class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
@@ -57,10 +58,10 @@ class SwarmApiMixin(object):
Example:
- >>> spec = client.create_swarm_spec(
+ >>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
- >>> client.init_swarm(
+ >>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
@@ -82,7 +83,9 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, swarm_spec=None):
+ force_new_cluster=False, swarm_spec=None,
+ default_addr_pool=None, subnet_size=None,
+ data_path_addr=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
@@ -107,9 +110,17 @@ class SwarmApiMixin(object):
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
+ default_addr_pool (list of strings): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
- ``True`` if successful.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -119,15 +130,44 @@ class SwarmApiMixin(object):
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
+
+ if default_addr_pool is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Address pool is only available for API version >= 1.39'
+ )
+ # subnet_size becomes 0 if not set with default_addr_pool
+ if subnet_size is None:
+ subnet_size = DEFAULT_SWARM_SUBNET_SIZE
+
+ if subnet_size is not None:
+ if utils.version_lt(self._version, '1.39'):
+ raise errors.InvalidVersion(
+ 'Subnet size is only available for API version >= 1.39'
+ )
+ # subnet_size is ignored if set without default_addr_pool
+ if default_addr_pool is None:
+ default_addr_pool = DEFAULT_SWARM_ADDR_POOL
+
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
+ 'DefaultAddrPool': default_addr_pool,
+ 'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
response = self._post_json(url, data=data)
- self._raise_for_status(response)
- return True
+ return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
@@ -165,7 +205,7 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
- advertise_addr=None):
+ advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
@@ -176,7 +216,7 @@ class SwarmApiMixin(object):
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
- Endpoint (VTEP). Default: ``None``
+ Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
@@ -184,6 +224,8 @@ class SwarmApiMixin(object):
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
@@ -193,11 +235,20 @@ class SwarmApiMixin(object):
If the server returns an error.
"""
data = {
- "RemoteAddrs": remote_addrs,
- "ListenAddr": listen_addr,
- "JoinToken": join_token,
- "AdvertiseAddr": advertise_addr,
+ 'RemoteAddrs': remote_addrs,
+ 'ListenAddr': listen_addr,
+ 'JoinToken': join_token,
+ 'AdvertiseAddr': advertise_addr,
}
+
+ if data_path_addr is not None:
+ if utils.version_lt(self._version, '1.30'):
+ raise errors.InvalidVersion(
+ 'Data address path is only available for '
+ 'API version >= 1.30'
+ )
+ data['DataPathAddr'] = data_path_addr
+
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
@@ -303,8 +354,8 @@ class SwarmApiMixin(object):
Example:
- >>> key = client.get_unlock_key()
- >>> client.unlock_node(key)
+ >>> key = client.api.get_unlock_key()
+ >>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
@@ -345,7 +396,7 @@ class SwarmApiMixin(object):
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
- >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
@@ -355,8 +406,10 @@ class SwarmApiMixin(object):
return True
@utils.minimum_version('1.24')
- def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
- rotate_manager_token=False):
+ def update_swarm(self, version, swarm_spec=None,
+ rotate_worker_token=False,
+ rotate_manager_token=False,
+ rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
@@ -370,6 +423,8 @@ class SwarmApiMixin(object):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Returns:
``True`` if the request went through.
@@ -378,12 +433,20 @@ class SwarmApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
-
url = self._url('/swarm/update')
- response = self._post_json(url, data=swarm_spec, params={
+ params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
- })
+ }
+ if rotate_manager_unlock_key:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'Rotate manager unlock key '
+ 'is only available for API version >= 1.25'
+ )
+ params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
+
+ response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True
diff --git a/docker/api/volume.py b/docker/api/volume.py
index 900a608..86b0018 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -2,7 +2,7 @@ from .. import errors
from .. import utils
-class VolumeApiMixin(object):
+class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@@ -21,7 +21,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.volumes()
+ >>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
@@ -56,7 +56,7 @@ class VolumeApiMixin(object):
Example:
- >>> volume = cli.create_volume(name='foobar', driver='local',
+ >>> volume = client.api.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
@@ -104,7 +104,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.inspect_volume('foobar')
+ >>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
diff --git a/docker/auth.py b/docker/auth.py
index 9635f93..4fa798f 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -2,14 +2,12 @@ import base64
import json
import logging
-import dockerpycreds
-import six
-
+from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -18,13 +16,13 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid index name ({0}). Cannot begin or end with a'
+ 'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
@@ -39,11 +37,11 @@ def resolve_index_name(index_name):
def get_config_header(client, registry):
log.debug('Looking for auth config')
- if not client._auth_configs:
+ if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
- client._auth_configs = load_config()
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
@@ -70,81 +68,258 @@ def split_repo_name(repo_name):
def get_credential_store(authconfig, registry):
- if not registry or registry == INDEX_NAME:
- registry = 'https://index.docker.io/v1/'
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in entries.items():
+ if not isinstance(entry, dict):
+ log.debug(
+ 'Config entry for key {} is not auth config'.format(
+ registry
+ )
+ )
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {}'.format(
+ registry
+ )
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug(
+ 'Found an IdentityToken entry for registry {}'.format(
+ registry
+ )
+ )
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ 'Auth data for {} is absent. Client might be using a '
+ 'credentials store instead.'.format(registry)
+ )
+ conf[registry] = {}
+ continue
- return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
- 'credsStore'
- )
+ username, password = decode_auth(entry['auth'])
+ log.debug(
+ 'Found entry (registry={}, username={})'
+ .format(repr(registry), repr(username))
+ )
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
-def resolve_authconfig(authconfig, registry=None, credstore_env=None):
- """
- Returns the authentication data from the given auth configuration for a
- specific registry. As with the Docker client, legacy entries in the config
- with full URLs are stripped down to hostnames before checking for a match.
- Returns None if no match was found.
- """
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
- if 'credHelpers' in authconfig or 'credsStore' in authconfig:
- store_name = get_credential_store(authconfig, registry)
- if store_name is not None:
- log.debug(
- 'Using credentials store "{0}"'.format(store_name)
- )
- cfg = _resolve_authconfig_credstore(
- authconfig, registry, store_name, env=credstore_env
- )
- if cfg is not None:
- return cfg
- log.debug('No entry in credstore - fetching from auth dict')
+ @property
+ def auths(self):
+ return self.get('auths', {})
- # Default to the public index server
- registry = resolve_index_name(registry) if registry else INDEX_NAME
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
- authdict = authconfig.get('auths', {})
- if registry in authdict:
- log.debug("Found {0}".format(repr(registry)))
- return authdict[registry]
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
- for key, conf in six.iteritems(authdict):
- if resolve_index_name(key) == registry:
- log.debug("Found {0}".format(repr(key)))
- return conf
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
- log.debug("No entry found")
- return None
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug(
+ f'Using credentials store "{store_name}"'
+ )
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug(f"Looking for auth entry for {repr(registry)}")
-def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
- env=None):
- if not registry or registry == INDEX_NAME:
- # The ecosystem is a little schizophrenic with index.docker.io VS
- # docker.io - in that case, it seems the full URL is necessary.
- registry = INDEX_URL
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
- store = dockerpycreds.Store(credstore_name, environment=env)
- try:
- data = store.get(registry)
- res = {
- 'ServerAddress': registry,
- }
- if data['Username'] == TOKEN_USERNAME:
- res['IdentityToken'] = data['Secret']
- else:
- res.update({
- 'Username': data['Username'],
- 'Password': data['Secret'],
- })
- return res
- except dockerpycreds.CredentialsNotFound as e:
- log.debug('No entry found')
+ if registry in self.auths:
+ log.debug(f"Found {repr(registry)}")
+ return self.auths[registry]
+
+ for key, conf in self.auths.items():
+ if resolve_index_name(key) == registry:
+ log.debug(f"Found {repr(key)}")
+ return conf
+
+ log.debug("No entry found")
return None
- except dockerpycreds.StoreError as e:
- raise errors.DockerException(
- 'Credentials store error: {0}'.format(repr(e))
- )
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug(f"Looking for auth entry for {repr(registry)}")
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except credentials.CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except credentials.StoreError as e:
+ raise errors.DockerException(
+ f'Credentials store error: {repr(e)}'
+ )
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = credentials.Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
@@ -152,7 +327,7 @@ def convert_to_hostname(url):
def decode_auth(auth):
- if isinstance(auth, six.string_types):
+ if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
@@ -177,100 +352,11 @@ def parse_auth(entries, raise_on_error=False):
Authentication registry.
"""
- conf = {}
- for registry, entry in six.iteritems(entries):
- if not isinstance(entry, dict):
- log.debug(
- 'Config entry for key {0} is not auth config'.format(registry)
- )
- # We sometimes fall back to parsing the whole config as if it was
- # the auth config by itself, for legacy purposes. In that case, we
- # fail silently and return an empty conf if any of the keys is not
- # formatted properly.
- if raise_on_error:
- raise errors.InvalidConfigFile(
- 'Invalid configuration for registry {0}'.format(registry)
- )
- return {}
- if 'identitytoken' in entry:
- log.debug('Found an IdentityToken entry for registry {0}'.format(
- registry
- ))
- conf[registry] = {
- 'IdentityToken': entry['identitytoken']
- }
- continue # Other values are irrelevant if we have a token, skip.
-
- if 'auth' not in entry:
- # Starting with engine v1.11 (API 1.23), an empty dictionary is
- # a valid value in the auths config.
- # https://github.com/docker/compose/issues/3265
- log.debug(
- 'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'.format(registry)
- )
- conf[registry] = {}
- continue
-
- username, password = decode_auth(entry['auth'])
- log.debug(
- 'Found entry (registry={0}, username={1})'
- .format(repr(registry), repr(username))
- )
-
- conf[registry] = {
- 'username': username,
- 'password': password,
- 'email': entry.get('email'),
- 'serveraddress': registry,
- }
- return conf
-
-
-def load_config(config_path=None, config_dict=None):
- """
- Loads authentication data from a Docker configuration file in the given
- root directory or if config_path is passed use given path.
- Lookup priority:
- explicit config_path parameter > DOCKER_CONFIG environment variable >
- ~/.docker/config.json > ~/.dockercfg
- """
+ return AuthConfig.parse_auth(entries, raise_on_error)
- if not config_dict:
- config_file = config.find_config_file(config_path)
- if not config_file:
- return {}
- try:
- with open(config_file) as f:
- config_dict = json.load(f)
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
- return _load_legacy_config(config_file)
-
- res = {}
- if config_dict.get('auths'):
- log.debug("Found 'auths' section")
- res.update({
- 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)
- })
- if config_dict.get('credsStore'):
- log.debug("Found 'credsStore' section")
- res.update({'credsStore': config_dict.pop('credsStore')})
- if config_dict.get('credHelpers'):
- log.debug("Found 'credHelpers' section")
- res.update({'credHelpers': config_dict.pop('credHelpers')})
- if res:
- return res
-
- log.debug(
- "Couldn't find auth-related section ; attempting to interpret"
- "as auth-only file"
- )
- return {'auths': parse_auth(config_dict)}
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
diff --git a/docker/client.py b/docker/client.py
index 8d4a52b..4dbd846 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,5 +1,5 @@
from .api.client import APIClient
-from .constants import DEFAULT_TIMEOUT_SECONDS
+from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
@@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
-class DockerClient(object):
+class DockerClient:
"""
A client for communicating with a Docker server.
@@ -26,7 +26,7 @@ class DockerClient(object):
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
@@ -35,6 +35,11 @@ class DockerClient(object):
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@@ -62,14 +67,19 @@ class DockerClient(object):
Args:
version (str): The version of the API to use. Set to ``auto`` to
- automatically detect the server's version. Default: ``1.30``
+ automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is
+ made via shelling out to the ssh client. Ensure the ssh
+ client is installed and configured on the host.
Example:
@@ -80,9 +90,15 @@ class DockerClient(object):
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
+ max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
+ use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
- timeout=timeout, version=version, **kwargs_from_env(**kwargs)
+ timeout=timeout,
+ max_pool_size=max_pool_size,
+ version=version,
+ use_ssh_client=use_ssh_client,
+ **kwargs_from_env(**kwargs)
)
# Resources
@@ -196,7 +212,7 @@ class DockerClient(object):
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
- s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
diff --git a/docker/constants.py b/docker/constants.py
index 7565a76..d5bfc35 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.35'
+DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -9,12 +9,36 @@ CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
-DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
+DEFAULT_USER_AGENT = f"docker-sdk-python/{version}"
DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
+DEFAULT_MAX_POOL_SIZE = 10
+
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/docker/context/__init__.py b/docker/context/__init__.py
new file mode 100644
index 0000000..0a6707f
--- /dev/null
+++ b/docker/context/__init__.py
@@ -0,0 +1,3 @@
+# flake8: noqa
+from .context import Context
+from .api import ContextAPI
diff --git a/docker/context/api.py b/docker/context/api.py
new file mode 100644
index 0000000..380e8c4
--- /dev/null
+++ b/docker/context/api.py
@@ -0,0 +1,203 @@
+import json
+import os
+
+from docker import errors
+from docker.context.config import get_meta_dir
+from docker.context.config import METAFILE
+from docker.context.config import get_current_context_name
+from docker.context.config import write_context_name_to_docker_config
+from docker.context import Context
+
+
+class ContextAPI:
+ """Context API.
+ Contains methods for context management:
+ create, list, remove, get, inspect.
+ """
+ DEFAULT_CONTEXT = Context("default", "swarm")
+
+ @classmethod
+ def create_context(
+ cls, name, orchestrator=None, host=None, tls_cfg=None,
+ default_namespace=None, skip_tls_verify=False):
+ """Creates a new context.
+ Returns:
+ (Context): a Context object.
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextAlreadyExists`
+ If a context with the name already exists.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.create_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ '"default" is a reserved context name')
+ ctx = Context.load_context(name)
+ if ctx:
+ raise errors.ContextAlreadyExists(name)
+ endpoint = "docker"
+ if orchestrator and orchestrator != "swarm":
+ endpoint = orchestrator
+ ctx = Context(name, orchestrator)
+ ctx.set_endpoint(
+ endpoint, host, tls_cfg,
+ skip_tls_verify=skip_tls_verify,
+ def_namespace=default_namespace)
+ ctx.save()
+ return ctx
+
+ @classmethod
+ def get_context(cls, name=None):
+ """Retrieves a context object.
+ Args:
+ name (str): The name of the context
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ctx = ContextAPI.get_context(name='test')
+ >>> print(ctx.Metadata)
+ {
+ "Name": "test",
+ "Metadata": {},
+ "Endpoints": {
+ "docker": {
+ "Host": "unix:///var/run/docker.sock",
+ "SkipTLSVerify": false
+ }
+ }
+ }
+ """
+ if not name:
+ name = get_current_context_name()
+ if name == "default":
+ return cls.DEFAULT_CONTEXT
+ return Context.load_context(name)
+
+ @classmethod
+ def contexts(cls):
+ """Context list.
+ Returns:
+ (Context): List of context objects.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ names = []
+ for dirname, dirnames, fnames in os.walk(get_meta_dir()):
+ for filename in fnames + dirnames:
+ if filename == METAFILE:
+ try:
+ data = json.load(
+ open(os.path.join(dirname, filename)))
+ names.append(data["Name"])
+ except Exception as e:
+ raise errors.ContextException(
+ "Failed to load metafile {}: {}".format(
+ filename, e))
+
+ contexts = [cls.DEFAULT_CONTEXT]
+ for name in names:
+ contexts.append(Context.load_context(name))
+ return contexts
+
+ @classmethod
+ def get_current_context(cls):
+ """Get current context.
+ Returns:
+ (Context): current context object.
+ """
+ return cls.get_context()
+
+ @classmethod
+ def set_current_context(cls, name="default"):
+ ctx = cls.get_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ err = write_context_name_to_docker_config(name)
+ if err:
+ raise errors.ContextException(
+ f'Failed to set current context: {err}')
+
+ @classmethod
+ def remove_context(cls, name):
+ """Remove a context. Similar to the ``docker context rm`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+ :py:class:`docker.errors.ContextException`
+ If name is default.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ raise errors.ContextException(
+ 'context "default" cannot be removed')
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+ if name == get_current_context_name():
+ write_context_name_to_docker_config(None)
+ ctx.remove()
+
+ @classmethod
+ def inspect_context(cls, name="default"):
+ """Remove a context. Similar to the ``docker context inspect`` command.
+
+ Args:
+ name (str): The name of the context
+
+ Raises:
+ :py:class:`docker.errors.MissingContextParameter`
+ If a context name is not provided.
+ :py:class:`docker.errors.ContextNotFound`
+ If a context with the name does not exist.
+
+ Example:
+
+ >>> from docker.context import ContextAPI
+ >>> ContextAPI.remove_context(name='test')
+ >>>
+ """
+ if not name:
+ raise errors.MissingContextParameter("name")
+ if name == "default":
+ return cls.DEFAULT_CONTEXT()
+ ctx = Context.load_context(name)
+ if not ctx:
+ raise errors.ContextNotFound(name)
+
+ return ctx()
diff --git a/docker/context/config.py b/docker/context/config.py
new file mode 100644
index 0000000..d761aef
--- /dev/null
+++ b/docker/context/config.py
@@ -0,0 +1,81 @@
+import os
+import json
+import hashlib
+
+from docker import utils
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.constants import DEFAULT_UNIX_SOCKET
+from docker.utils.config import find_config_file
+
+METAFILE = "meta.json"
+
+
+def get_current_context_name():
+ name = "default"
+ docker_cfg_path = find_config_file()
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ name = json.load(f).get("currentContext", "default")
+ except Exception:
+ return "default"
+ return name
+
+
+def write_context_name_to_docker_config(name=None):
+ if name == 'default':
+ name = None
+ docker_cfg_path = find_config_file()
+ config = {}
+ if docker_cfg_path:
+ try:
+ with open(docker_cfg_path) as f:
+ config = json.load(f)
+ except Exception as e:
+ return e
+ current_context = config.get("currentContext", None)
+ if current_context and not name:
+ del config["currentContext"]
+ elif name:
+ config["currentContext"] = name
+ else:
+ return
+ try:
+ with open(docker_cfg_path, "w") as f:
+ json.dump(config, f, indent=4)
+ except Exception as e:
+ return e
+
+
+def get_context_id(name):
+ return hashlib.sha256(name.encode('utf-8')).hexdigest()
+
+
+def get_context_dir():
+ return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
+
+
+def get_meta_dir(name=None):
+ meta_dir = os.path.join(get_context_dir(), "meta")
+ if name:
+ return os.path.join(meta_dir, get_context_id(name))
+ return meta_dir
+
+
+def get_meta_file(name):
+ return os.path.join(get_meta_dir(name), METAFILE)
+
+
+def get_tls_dir(name=None, endpoint=""):
+ context_dir = get_context_dir()
+ if name:
+ return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
+ return os.path.join(context_dir, "tls")
+
+
+def get_context_host(path=None, tls=False):
+ host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
+ if host == DEFAULT_UNIX_SOCKET:
+ # remove http+ from default docker socket url
+ return host.strip("http+")
+ return host
diff --git a/docker/context/context.py b/docker/context/context.py
new file mode 100644
index 0000000..dbaa01c
--- /dev/null
+++ b/docker/context/context.py
@@ -0,0 +1,243 @@
+import os
+import json
+from shutil import copyfile, rmtree
+from docker.tls import TLSConfig
+from docker.errors import ContextException
+from docker.context.config import get_meta_dir
+from docker.context.config import get_meta_file
+from docker.context.config import get_tls_dir
+from docker.context.config import get_context_host
+
+
+class Context:
+ """A context."""
+
+ def __init__(self, name, orchestrator=None, host=None, endpoints=None,
+ tls=False):
+ if not name:
+ raise Exception("Name not provided")
+ self.name = name
+ self.context_type = None
+ self.orchestrator = orchestrator
+ self.endpoints = {}
+ self.tls_cfg = {}
+ self.meta_path = "IN MEMORY"
+ self.tls_path = "IN MEMORY"
+
+ if not endpoints:
+ # set default docker endpoint if no endpoint is set
+ default_endpoint = "docker" if (
+ not orchestrator or orchestrator == "swarm"
+ ) else orchestrator
+
+ self.endpoints = {
+ default_endpoint: {
+ "Host": get_context_host(host, tls),
+ "SkipTLSVerify": not tls
+ }
+ }
+ return
+
+ # check docker endpoints
+ for k, v in endpoints.items():
+ if not isinstance(v, dict):
+ # unknown format
+ raise ContextException("""Unknown endpoint format for
+ context {}: {}""".format(name, v))
+
+ self.endpoints[k] = v
+ if k != "docker":
+ continue
+
+ self.endpoints[k]["Host"] = v.get("Host", get_context_host(
+ host, tls))
+ self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
+ "SkipTLSVerify", not tls))
+
+ def set_endpoint(
+ self, name="docker", host=None, tls_cfg=None,
+ skip_tls_verify=False, def_namespace=None):
+ self.endpoints[name] = {
+ "Host": get_context_host(host, not skip_tls_verify),
+ "SkipTLSVerify": skip_tls_verify
+ }
+ if def_namespace:
+ self.endpoints[name]["DefaultNamespace"] = def_namespace
+
+ if tls_cfg:
+ self.tls_cfg[name] = tls_cfg
+
+ def inspect(self):
+ return self.__call__()
+
+ @classmethod
+ def load_context(cls, name):
+ meta = Context._load_meta(name)
+ if meta:
+ instance = cls(
+ meta["Name"],
+ orchestrator=meta["Metadata"].get("StackOrchestrator", None),
+ endpoints=meta.get("Endpoints", None))
+ instance.context_type = meta["Metadata"].get("Type", None)
+ instance._load_certs()
+ instance.meta_path = get_meta_dir(name)
+ return instance
+ return None
+
+ @classmethod
+ def _load_meta(cls, name):
+ meta_file = get_meta_file(name)
+ if not os.path.isfile(meta_file):
+ return None
+
+ metadata = {}
+ try:
+ with open(meta_file) as f:
+ metadata = json.load(f)
+ except (OSError, KeyError, ValueError) as e:
+ # unknown format
+ raise Exception("""Detected corrupted meta file for
+ context {} : {}""".format(name, e))
+
+ # for docker endpoints, set defaults for
+ # Host and SkipTLSVerify fields
+ for k, v in metadata["Endpoints"].items():
+ if k != "docker":
+ continue
+ metadata["Endpoints"][k]["Host"] = v.get(
+ "Host", get_context_host(None, False))
+ metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
+ v.get("SkipTLSVerify", True))
+
+ return metadata
+
+ def _load_certs(self):
+ certs = {}
+ tls_dir = get_tls_dir(self.name)
+ for endpoint in self.endpoints.keys():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ continue
+ ca_cert = None
+ cert = None
+ key = None
+ for filename in os.listdir(os.path.join(tls_dir, endpoint)):
+ if filename.startswith("ca"):
+ ca_cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("cert"):
+ cert = os.path.join(tls_dir, endpoint, filename)
+ elif filename.startswith("key"):
+ key = os.path.join(tls_dir, endpoint, filename)
+ if all([ca_cert, cert, key]):
+ verify = None
+ if endpoint == "docker" and not self.endpoints["docker"].get(
+ "SkipTLSVerify", False):
+ verify = True
+ certs[endpoint] = TLSConfig(
+ client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
+ self.tls_cfg = certs
+ self.tls_path = tls_dir
+
+ def save(self):
+ meta_dir = get_meta_dir(self.name)
+ if not os.path.isdir(meta_dir):
+ os.makedirs(meta_dir)
+ with open(get_meta_file(self.name), "w") as f:
+ f.write(json.dumps(self.Metadata))
+
+ tls_dir = get_tls_dir(self.name)
+ for endpoint, tls in self.tls_cfg.items():
+ if not os.path.isdir(os.path.join(tls_dir, endpoint)):
+ os.makedirs(os.path.join(tls_dir, endpoint))
+
+ ca_file = tls.ca_cert
+ if ca_file:
+ copyfile(ca_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(ca_file)))
+
+ if tls.cert:
+ cert_file, key_file = tls.cert
+ copyfile(cert_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(cert_file)))
+ copyfile(key_file, os.path.join(
+ tls_dir, endpoint, os.path.basename(key_file)))
+
+ self.meta_path = get_meta_dir(self.name)
+ self.tls_path = get_tls_dir(self.name)
+
+ def remove(self):
+ if os.path.isdir(self.meta_path):
+ rmtree(self.meta_path)
+ if os.path.isdir(self.tls_path):
+ rmtree(self.tls_path)
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: '{self.name}'>"
+
+ def __str__(self):
+ return json.dumps(self.__call__(), indent=2)
+
+ def __call__(self):
+ result = self.Metadata
+ result.update(self.TLSMaterial)
+ result.update(self.Storage)
+ return result
+
+ def is_docker_host(self):
+ return self.context_type is None
+
+ @property
+ def Name(self):
+ return self.name
+
+ @property
+ def Host(self):
+ if not self.orchestrator or self.orchestrator == "swarm":
+ endpoint = self.endpoints.get("docker", None)
+ if endpoint:
+ return endpoint.get("Host", None)
+ return None
+
+ return self.endpoints[self.orchestrator].get("Host", None)
+
+ @property
+ def Orchestrator(self):
+ return self.orchestrator
+
+ @property
+ def Metadata(self):
+ meta = {}
+ if self.orchestrator:
+ meta = {"StackOrchestrator": self.orchestrator}
+ return {
+ "Name": self.name,
+ "Metadata": meta,
+ "Endpoints": self.endpoints
+ }
+
+ @property
+ def TLSConfig(self):
+ key = self.orchestrator
+ if not key or key == "swarm":
+ key = "docker"
+ if key in self.tls_cfg.keys():
+ return self.tls_cfg[key]
+ return None
+
+ @property
+ def TLSMaterial(self):
+ certs = {}
+ for endpoint, tls in self.tls_cfg.items():
+ cert, key = tls.cert
+ certs[endpoint] = list(
+ map(os.path.basename, [tls.ca_cert, cert, key]))
+ return {
+ "TLSMaterial": certs
+ }
+
+ @property
+ def Storage(self):
+ return {
+ "Storage": {
+ "MetadataPath": self.meta_path,
+ "TLSPath": self.tls_path
+ }}
diff --git a/docker/credentials/__init__.py b/docker/credentials/__init__.py
new file mode 100644
index 0000000..31ad28e
--- /dev/null
+++ b/docker/credentials/__init__.py
@@ -0,0 +1,4 @@
+# flake8: noqa
+from .store import Store
+from .errors import StoreError, CredentialsNotFound
+from .constants import *
diff --git a/docker/credentials/constants.py b/docker/credentials/constants.py
new file mode 100644
index 0000000..6a82d8d
--- /dev/null
+++ b/docker/credentials/constants.py
@@ -0,0 +1,4 @@
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/docker/credentials/errors.py b/docker/credentials/errors.py
new file mode 100644
index 0000000..42a1bc1
--- /dev/null
+++ b/docker/credentials/errors.py
@@ -0,0 +1,25 @@
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(
+ 'No matching credentials in {}'.format(
+ program
+ )
+ )
+ return StoreError(
+ 'Credentials store {} exited with "{}".'.format(
+ program, cpe.output.decode('utf-8').strip()
+ )
+ )
diff --git a/docker/credentials/store.py b/docker/credentials/store.py
new file mode 100644
index 0000000..e55976f
--- /dev/null
+++ b/docker/credentials/store.py
@@ -0,0 +1,94 @@
+import errno
+import json
+import subprocess
+
+from . import constants
+from . import errors
+from .utils import create_environment_dict
+from .utils import find_executable
+
+
+class Store:
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = find_executable(self.program)
+ self.environment = environment
+ if self.exe is None:
+ raise errors.InitializationError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ f'No matching credentials in {self.program}'
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, bytes):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ '{} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+ else:
+ raise errors.StoreError(
+ 'Unexpected OS error "{}", errno={}'.format(
+ e.strerror, e.errno
+ )
+ )
+ return output
diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py
new file mode 100644
index 0000000..3f720ef
--- /dev/null
+++ b/docker/credentials/utils.py
@@ -0,0 +1,38 @@
+import distutils.spawn
+import os
+import sys
+
+
+def find_executable(executable, path=None):
+ """
+ As distutils.spawn.find_executable, but on Windows, look up
+ every extension declared in PATHEXT instead of just `.exe`
+ """
+ if sys.platform != 'win32':
+ return distutils.spawn.find_executable(executable, path)
+
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ for ext in extensions:
+ f = os.path.join(p, base + ext)
+ if os.path.isfile(f):
+ return f
+ return None
+ else:
+ return executable
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/docker/errors.py b/docker/errors.py
index 0253695..ba95256 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -38,23 +38,25 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
- super(APIError, self).__init__(message)
+ super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
- message = super(APIError, self).__str__()
+ message = super().__str__()
if self.is_client_error():
- message = '{0} Client Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Client Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
elif self.is_server_error():
- message = '{0} Server Error: {1}'.format(
- self.response.status_code, self.response.reason)
+ message = '{} Server Error for {}: {}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
if self.explanation:
- message = '{0} ("{1}")'.format(message, self.explanation)
+ message = f'{message} ("{self.explanation}")'
return message
@@ -63,6 +65,9 @@ class APIError(requests.exceptions.HTTPError, DockerException):
if self.response is not None:
return self.response.status_code
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
def is_client_error(self):
if self.status_code is None:
return False
@@ -128,11 +133,11 @@ class ContainerError(DockerException):
self.image = image
self.stderr = stderr
- err = ": {}".format(stderr) if stderr is not None else ""
+ err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
- super(ContainerError, self).__init__(msg)
+ super().__init__(msg)
class StreamParseError(RuntimeError):
@@ -142,7 +147,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException):
def __init__(self, reason, build_log):
- super(BuildError, self).__init__(reason)
+ super().__init__(reason)
self.msg = reason
self.build_log = build_log
@@ -152,11 +157,43 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs):
- quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
- text = ["{}() ".format(name)]
+ quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
+ text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return (f"missing parameter: {self.param}")
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context {self.name} already exists")
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return (f"context '{self.name}' not found")
diff --git a/docker/models/configs.py b/docker/models/configs.py
index 7f23f65..3588c8b 100644
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
diff --git a/docker/models/containers.py b/docker/models/containers.py
index b33a718..957deed 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -15,7 +15,12 @@ from .resource import Collection, Model
class Container(Model):
-
+ """ Local representation of a container object. Detailed configuration may
+ be accessed through the :py:attr:`attrs` attribute. Note that local
+ attributes are cached; users may call :py:meth:`reload` to
+ query the Docker daemon for the current properties, causing
+ :py:attr:`attrs` to be refreshed.
+ """
@property
def name(self):
"""
@@ -57,6 +62,13 @@ class Container(Model):
return self.attrs['State']['Status']
return self.attrs['State']
+ @property
+ def ports(self):
+ """
+ The ports that the container exposes as a dictionary.
+ """
+ return self.attrs.get('NetworkSettings', {}).get('Ports', {})
+
def attach(self, **kwargs):
"""
Attach to this container.
@@ -139,7 +151,7 @@ class Container(Model):
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
- socket=False, environment=None, workdir=None):
+ socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
@@ -161,16 +173,18 @@ class Container(Model):
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
+ demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
- either ``stream```or ``socket`` is ``True``.
- output: (generator or str):
+ either ``stream`` or ``socket`` is ``True``.
+ output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
- A string containing response data otherwise.
+ If ``demux=True``, a tuple of two bytes: stdout and stderr.
+ A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
@@ -179,10 +193,11 @@ class Container(Model):
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
- workdir=workdir
+ workdir=workdir,
)
exec_output = self.client.api.exec_start(
- resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
+ demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
@@ -210,7 +225,8 @@ class Container(Model):
"""
return self.client.api.export(self.id, chunk_size)
- def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE,
+ encode_stream=False):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
@@ -220,6 +236,8 @@ class Container(Model):
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
+ encode_stream (bool): Determines if data should be encoded
+ (gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
@@ -228,8 +246,20 @@ class Container(Model):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+
+ Example:
+
+ >>> f = open('./sh_bin.tar', 'wb')
+ >>> bits, stat = container.get_archive('/bin/sh')
+ >>> print(stat)
+ {'name': 'sh', 'size': 1075464, 'mode': 493,
+ 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
+ >>> for chunk in bits:
+ ... f.write(chunk)
+ >>> f.close()
"""
- return self.client.api.get_archive(self.id, path, chunk_size)
+ return self.client.api.get_archive(self.id, path,
+ chunk_size, encode_stream)
def kill(self, signal=None):
"""
@@ -253,16 +283,16 @@ class Container(Model):
generator you can iterate over to retrieve log output as it happens.
Args:
- stdout (bool): Get ``STDOUT``
- stderr (bool): Get ``STDERR``
- stream (bool): Stream the response
- timestamps (bool): Show timestamps
+ stdout (bool): Get ``STDOUT``. Default ``True``
+ stderr (bool): Get ``STDERR``. Default ``True``
+ stream (bool): Stream the response. Default ``False``
+ timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
- follow (bool): Follow log output
+ follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
@@ -380,7 +410,8 @@ class Container(Model):
Args:
decode (bool): If set to true, stream will be decoded into dicts
- on the fly. False by default.
+ on the fly. Only applicable if ``stream`` is True.
+ False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
@@ -521,12 +552,15 @@ class ContainerCollection(Collection):
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
+ cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
+ cpu_rt_period (int): Limit CPU real-time period in microseconds.
+ cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
@@ -549,6 +583,9 @@ class ContainerCollection(Collection):
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
+ device_requests (:py:class:`list`): Expose host resources such as
+ GPUs to the container, as a list of
+ :py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
@@ -558,7 +595,7 @@ class ContainerCollection(Collection):
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
- extra_hosts (dict): Addtional hostnames to resolve inside the
+ extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
@@ -570,19 +607,17 @@ class ContainerCollection(Collection):
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
+ kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
- links (dict or list of tuples): Either a dictionary mapping name
- to alias or as a list of ``(name, alias)`` tuples.
- log_config (dict): Logging configuration, as a dictionary with
- keys:
-
- - ``type`` The logging driver name.
- - ``config`` A dictionary of configuration for the logging
- driver.
-
+ links (dict): Mapping of links using the
+ ``{'container': 'alias'}`` format. The alias is optional.
+ Containers declared in this dict will be linked to the new
+ container using the provided alias. Default: ``None``.
+ log_config (LogConfig): Logging configuration.
+ lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
@@ -590,6 +625,7 @@ class ContainerCollection(Collection):
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
+ mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
@@ -613,6 +649,7 @@ class ContainerCollection(Collection):
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
+ This mode is incompatible with ``ports``.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
@@ -628,8 +665,8 @@ class ContainerCollection(Collection):
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
- ``port/protocol``, where the protocol is either ``tcp`` or
- ``udp``.
+ ``port/protocol``, where the protocol is either ``tcp``,
+ ``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
@@ -646,6 +683,7 @@ class ContainerCollection(Collection):
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
+ Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
@@ -662,6 +700,7 @@ class ContainerCollection(Collection):
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
+ runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
@@ -691,13 +730,21 @@ class ContainerCollection(Collection):
}
tty (bool): Allocate a pseudo-TTY.
- ulimits (:py:class:`list`): Ulimits to set inside the container, as
- a list of dicts.
+ ulimits (:py:class:`list`): Ulimits to set inside the container,
+ as a list of :py:class:`docker.types.Ulimit` instances.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
+ uts_mode (str): Sets the UTS namespace mode for the container.
+ Supported values are: ``host``
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
@@ -714,10 +761,17 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+ Or a list of strings which each one of its elements specifies a mount volume.
+
+ For example:
+
+ .. code-block:: python
+
+ ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
+
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
- runtime (str): Runtime to use with this container.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
@@ -863,7 +917,8 @@ class ContainerCollection(Collection):
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
@@ -932,8 +987,8 @@ RUN_CREATE_KWARGS = [
'stdin_open',
'stop_signal',
'tty',
+ 'use_config_proxy',
'user',
- 'volume_driver',
'working_dir',
]
@@ -960,6 +1015,7 @@ RUN_HOST_CONFIG_KWARGS = [
'device_write_bps',
'device_write_iops',
'devices',
+ 'device_requests',
'dns_opt',
'dns_search',
'dns',
@@ -995,7 +1051,9 @@ RUN_HOST_CONFIG_KWARGS = [
'tmpfs',
'ulimits',
'userns_mode',
+ 'uts_mode',
'version',
+ 'volume_driver',
'volumes_from',
'runtime'
]
diff --git a/docker/models/images.py b/docker/models/images.py
index 41632c6..46f8efe 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -1,7 +1,6 @@
import itertools
import re
-
-import six
+import warnings
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
@@ -16,7 +15,7 @@ class Image(Model):
An image on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+ return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
@@ -59,14 +58,20 @@ class Image(Model):
"""
return self.client.api.history(self.id)
- def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
+ def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
- chunk_size (int): The number of bytes returned by each iteration
- of the generator. If ``None``, data will be streamed as it is
- received. Default: 2 MB
+ chunk_size (int): The generator will return up to that much data
+ per iteration, but may return less. If ``None``, data will be
+ streamed as it is received. Default: 2 MB
+ named (str or bool): If ``False`` (default), the tarball will not
+ retain repository and tag information for this image. If set
+ to ``True``, the first tag in the :py:attr:`~tags` list will
+ be used to identify the image. Alternatively, any element of
+ the :py:attr:`~tags` list can be used as an argument to use
+ that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
@@ -77,13 +82,23 @@ class Image(Model):
Example:
- >>> image = cli.get_image("busybox:latest")
- >>> f = open('/tmp/busybox-latest.tar', 'w')
- >>> for chunk in image:
+ >>> image = cli.images.get("busybox:latest")
+ >>> f = open('/tmp/busybox-latest.tar', 'wb')
+ >>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
- return self.client.api.get_image(self.id, chunk_size)
+ img = self.id
+ if named:
+ img = self.tags[0] if self.tags else img
+ if isinstance(named, str):
+ if named not in self.tags:
+ raise InvalidArgument(
+ f"{named} is not a valid tag for this image"
+ )
+ img = named
+
+ return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
@@ -110,7 +125,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
- super(RegistryData, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.image_name = image_name
@property
@@ -163,7 +178,7 @@ class RegistryData(Model):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
- '"{0}" is not a valid platform descriptor'.format(platform)
+ f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
@@ -241,6 +256,10 @@ class ImageCollection(Collection):
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
@@ -256,7 +275,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
- if isinstance(resp, six.string_types):
+ if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
@@ -294,22 +313,26 @@ class ImageCollection(Collection):
"""
return self.prepare_model(self.client.api.inspect_image(name))
- def get_registry_data(self, name):
+ def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
+
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
- attrs=self.client.api.inspect_distribution(name),
+ attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
@@ -325,7 +348,8 @@ class ImageCollection(Collection):
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- - ``label`` (str): format either ``key`` or ``key=value``
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
Returns:
(list of :py:class:`Image`): The images.
@@ -369,12 +393,13 @@ class ImageCollection(Collection):
return [self.get(i) for i in images]
- def pull(self, repository, tag=None, **kwargs):
+ def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
- If no tag is specified, all tags from that repository will be
- pulled.
+ If ``tag`` is ``None`` or empty, it is set to ``latest``.
+ If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
+ tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
@@ -383,15 +408,15 @@ class ImageCollection(Collection):
Args:
repository (str): The repository to pull
tag (str): The tag to pull
- auth_config (dict): Override the credentials that
- :py:meth:`~docker.client.DockerClient.login` has set for
- this request. ``auth_config`` should contain the ``username``
- and ``password`` keys to be valid.
+ auth_config (dict): Override the credentials that are found in the
+ config for this request. ``auth_config`` should contain the
+ ``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
+ all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
- If no ``tag`` was specified, the method will return a list
+ If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
@@ -401,16 +426,30 @@ class ImageCollection(Collection):
Example:
>>> # Pull the image tagged `latest` in the busybox repo
- >>> image = client.images.pull('busybox:latest')
+ >>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
- >>> images = client.images.pull('busybox')
+ >>> images = client.images.pull('busybox', all_tags=True)
"""
- if not tag:
- repository, tag = parse_repository_tag(repository)
+ repository, image_tag = parse_repository_tag(repository)
+ tag = tag or image_tag or 'latest'
- self.client.api.pull(repository, tag=tag, **kwargs)
- if tag:
+ if 'stream' in kwargs:
+ warnings.warn(
+ '`stream` is not a valid parameter for this method'
+ ' and will be overridden'
+ )
+ del kwargs['stream']
+
+ pull_log = self.client.api.pull(
+ repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
+ )
+ for _ in pull_log:
+ # We don't do anything with the logs, but we need
+ # to keep the connection alive and wait for the image
+ # to be pulled.
+ pass
+ if not all_tags:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
diff --git a/docker/models/networks.py b/docker/models/networks.py
index be3291a..093deb7 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -46,6 +46,8 @@ class Network(Model):
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
+ driver_opt (dict): A dictionary of options to provide to the
+ network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
@@ -190,7 +192,8 @@ class NetworkCollection(Collection):
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- - ``label=[<key>]`` or ``label=[<key>=<value>]``.
+ - `label` (str|list): format either ``"key"``, ``"key=value"``
+ or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
index 0688018..37ecefb 100644
--- a/docker/models/plugins.py
+++ b/docker/models/plugins.py
@@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -117,9 +117,8 @@ class Plugin(Model):
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
- for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
- yield d
- self._reload()
+ yield from self.client.api.upgrade_plugin(self.name, remote, privileges)
+ self.reload()
class PluginCollection(Collection):
diff --git a/docker/models/resource.py b/docker/models/resource.py
index ed3900a..dec2349 100644
--- a/docker/models/resource.py
+++ b/docker/models/resource.py
@@ -1,5 +1,4 @@
-
-class Model(object):
+class Model:
"""
A base class for representing a single object on the server.
"""
@@ -18,13 +17,13 @@ class Model(object):
self.attrs = {}
def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+ return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
- return hash("%s:%s" % (self.__class__.__name__, self.id))
+ return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
@@ -49,7 +48,7 @@ class Model(object):
self.attrs = new_model.attrs
-class Collection(object):
+class Collection:
"""
A base class for representing all objects of a particular type on the
server.
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
index ca11ede..da01d44 100644
--- a/docker/models/secrets.py
+++ b/docker/models/secrets.py
@@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -30,6 +30,7 @@ class SecretCollection(Collection):
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
diff --git a/docker/models/services.py b/docker/models/services.py
index 458d2c8..200dd33 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -1,6 +1,6 @@
import copy
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
-from docker.types import TaskTemplate, ContainerSpec, ServiceMode
+from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
from .resource import Model, Collection
@@ -42,7 +42,7 @@ class Service(Model):
``label``, and ``desired-state``.
Returns:
- (:py:class:`list`): List of task dictionaries.
+ :py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
@@ -84,26 +84,27 @@ class Service(Model):
def logs(self, **kwargs):
"""
- Get log stream for the service.
- Note: This method works only for services with the ``json-file``
- or ``journald`` logging drivers.
-
- Args:
- details (bool): Show extra details provided to logs.
- Default: ``False``
- follow (bool): Keep connection open to read logs as they are
- sent by the Engine. Default: ``False``
- stdout (bool): Return logs from ``stdout``. Default: ``False``
- stderr (bool): Return logs from ``stderr``. Default: ``False``
- since (int): UNIX timestamp for the logs staring point.
- Default: 0
- timestamps (bool): Add timestamps to every log line.
- tail (string or int): Number of log lines to be returned,
- counting from the current end of the logs. Specify an
- integer or ``'all'`` to output all log lines.
- Default: ``all``
-
- Returns (generator): Logs for the service.
+ Get log stream for the service.
+ Note: This method works only for services with the ``json-file``
+ or ``journald`` logging drivers.
+
+ Args:
+ details (bool): Show extra details provided to logs.
+ Default: ``False``
+ follow (bool): Keep connection open to read logs as they are
+ sent by the Engine. Default: ``False``
+ stdout (bool): Return logs from ``stdout``. Default: ``False``
+ stderr (bool): Return logs from ``stderr``. Default: ``False``
+ since (int): UNIX timestamp for the logs staring point.
+ Default: 0
+ timestamps (bool): Add timestamps to every log line.
+ tail (string or int): Number of log lines to be returned,
+ counting from the current end of the logs. Specify an
+ integer or ``'all'`` to output all log lines.
+ Default: ``all``
+
+ Returns:
+ generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
@@ -118,7 +119,7 @@ class Service(Model):
replicas (int): The number of containers that should be running.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
@@ -134,7 +135,7 @@ class Service(Model):
Force update the service even if no changes require it.
Returns:
- ``True``if successful.
+ bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
@@ -152,13 +153,22 @@ class ServiceCollection(Collection):
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
- constraints (list of str): Placement constraints.
+ constraints (list of str): :py:class:`~docker.types.Placement`
+ constraints.
+ preferences (list of tuple): :py:class:`~docker.types.Placement`
+ preferences.
+ maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
+ or (int) representing maximum number of replicas per node.
+ platforms (list of tuple): A list of platform constraints
+ expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
+ init (boolean): Run an init inside the container that forwards
+ signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
@@ -170,16 +180,19 @@ class ServiceCollection(Collection):
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
- networks (list of str): List of network names or IDs to attach
- the service to. Default: ``None``.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`~docker.types.NetworkAttachmentConfig` to attach the
+ service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
- secrets (list of :py:class:`docker.types.SecretReference`): List
+ secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
+ rollback_config (RollbackConfig): Specification for the rollback
+ strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
@@ -195,13 +208,18 @@ class ServiceCollection(Collection):
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
- configs (:py:class:`list`): List of :py:class:`ConfigReference`
- that will be exposed to the service.
+ configs (:py:class:`list`): List of
+ :py:class:`~docker.types.ConfigReference` that will be exposed
+ to the service.
privileges (Privileges): Security options for the service's
containers.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to
+ the default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop
+ from the default set for the container.
Returns:
- (:py:class:`Service`) The created service.
+ :py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
@@ -223,7 +241,7 @@ class ServiceCollection(Collection):
into the output.
Returns:
- (:py:class:`Service`): The service.
+ :py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
@@ -248,7 +266,7 @@ class ServiceCollection(Collection):
Default: ``None``.
Returns:
- (list of :py:class:`Service`): The services.
+ list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
@@ -263,6 +281,8 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
+ 'cap_add',
+ 'cap_drop',
'command',
'configs',
'dns_config',
@@ -272,6 +292,7 @@ CONTAINER_SPEC_KWARGS = [
'hostname',
'hosts',
'image',
+ 'init',
'isolation',
'labels',
'mounts',
@@ -302,6 +323,13 @@ CREATE_SERVICE_KWARGS = [
'endpoint_spec',
]
+PLACEMENT_KWARGS = [
+ 'constraints',
+ 'preferences',
+ 'platforms',
+ 'maxreplicas',
+]
+
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
@@ -321,10 +349,12 @@ def _get_create_service_kwargs(func_name, kwargs):
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
- if 'constraints' in kwargs:
- task_template_kwargs['placement'] = {
- 'Constraints': kwargs.pop('constraints')
- }
+ placement = {}
+ for key in copy.copy(kwargs):
+ if key in PLACEMENT_KWARGS:
+ placement[key] = kwargs.pop(key)
+ placement = Placement(**placement)
+ task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index 7396e73..b0b1a2e 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -11,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
- super(Swarm, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
@@ -34,7 +34,8 @@ class Swarm(Model):
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, **kwargs):
+ force_new_cluster=False, default_addr_pool=None,
+ subnet_size=None, data_path_addr=None, **kwargs):
"""
Initialize a new swarm on this Engine.
@@ -56,6 +57,14 @@ class Swarm(Model):
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
+ default_addr_pool (list of str): Default Address Pool specifies
+ default subnet pools for global scope networks. Each pool
+ should be specified as a CIDR block, like '10.0.0.0/8'.
+ Default: None
+ subnet_size (int): SubnetSize specifies the subnet size of the
+ networks created from the default subnet pool. Default: None
+ data_path_addr (string): Address or interface to use for data path
+ traffic. For example, 192.168.1.1, or an interface, like eth0.
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
@@ -89,7 +98,7 @@ class Swarm(Model):
created in the orchestrator.
Returns:
- ``True`` if the request went through.
+ (str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
@@ -99,7 +108,8 @@ class Swarm(Model):
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
- force_new_cluster=False, snapshot_interval=5000,
+ force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
+ subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
@@ -107,11 +117,15 @@ class Swarm(Model):
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
- 'force_new_cluster': force_new_cluster
+ 'force_new_cluster': force_new_cluster,
+ 'default_addr_pool': default_addr_pool,
+ 'subnet_size': subnet_size,
+ 'data_path_addr': data_path_addr,
}
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
- self.client.api.init_swarm(**init_kwargs)
+ node_id = self.client.api.init_swarm(**init_kwargs)
self.reload()
+ return node_id
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
@@ -137,7 +151,7 @@ class Swarm(Model):
unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False,
- **kwargs):
+ rotate_manager_unlock_key=False, **kwargs):
"""
Update the swarm's configuration.
@@ -150,7 +164,8 @@ class Swarm(Model):
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
-
+ rotate_manager_unlock_key (bool): Rotate the manager unlock key.
+ Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
@@ -164,5 +179,6 @@ class Swarm(Model):
version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
- rotate_manager_token=rotate_manager_token
+ rotate_manager_token=rotate_manager_token,
+ rotate_manager_unlock_key=rotate_manager_unlock_key
)
diff --git a/docker/tls.py b/docker/tls.py
index 4900e9f..067d556 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,10 +2,10 @@ import os
import ssl
from . import errors
-from .transport import SSLAdapter
+from .transport import SSLHTTPAdapter
-class TLSConfig(object):
+class TLSConfig:
"""
TLS configuration.
@@ -32,7 +32,7 @@ class TLSConfig(object):
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
- # leaving tls_verify=False
+ # leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -62,7 +62,7 @@ class TLSConfig(object):
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
- # "tls" and "tls_verify" must have both or neither cert/key files In
+ # "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
@@ -71,7 +71,7 @@ class TLSConfig(object):
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
- 'client_config must be a tuple of'
+ 'client_cert must be a tuple of'
' (client certificate, key file)'
)
@@ -79,7 +79,7 @@ class TLSConfig(object):
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
- ' through the client_config param'
+ ' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
@@ -88,7 +88,7 @@ class TLSConfig(object):
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
- 'Invalid CA certificate provided for `tls_ca_cert`.'
+ 'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
@@ -105,7 +105,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', SSLAdapter(
+ client.mount('https://', SSLHTTPAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index abbee18..e37fc3b 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,8 +1,13 @@
# flake8: noqa
-from .unixconn import UnixAdapter
-from .ssladapter import SSLAdapter
+from .unixconn import UnixHTTPAdapter
+from .ssladapter import SSLHTTPAdapter
try:
- from .npipeconn import NpipeAdapter
+ from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
+
+try:
+ from .sshconn import SSHHTTPAdapter
+except ImportError:
+ pass
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
new file mode 100644
index 0000000..dfbb193
--- /dev/null
+++ b/docker/transport/basehttpadapter.py
@@ -0,0 +1,8 @@
+import requests.adapters
+
+
+class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
+ def close(self):
+ super().close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index ab9b904..df67f21 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,13 +1,11 @@
-import six
+import queue
import requests.adapters
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -17,9 +15,9 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class NpipeHTTPConnection(httplib.HTTPConnection, object):
+class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
- super(NpipeHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
@@ -34,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
- super(NpipeHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
@@ -56,7 +54,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -68,20 +66,23 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
return conn or self._new_conn()
-class NpipeAdapter(requests.adapters.HTTPAdapter):
+class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, base_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(NpipeAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -90,7 +91,8 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
return pool
pool = NpipeHTTPConnectionPool(
- self.npipe_path, self.timeout
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
@@ -103,6 +105,3 @@ class NpipeAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index c04b39d..766372a 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -1,7 +1,7 @@
import functools
+import time
import io
-import six
import win32file
import win32pipe
@@ -9,7 +9,7 @@ cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
-RETRY_WAIT_TIMEOUT = 10000
+MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@@ -23,7 +23,7 @@ def check_closed(f):
return wrapped
-class NpipeSocket(object):
+class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
@@ -46,8 +46,7 @@ class NpipeSocket(object):
self._closed = True
@check_closed
- def connect(self, address):
- win32pipe.WaitNamedPipe(address, self._timeout)
+ def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
@@ -65,8 +64,10 @@ class NpipeSocket(object):
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
- win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
- return self.connect(address)
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
@@ -87,10 +88,6 @@ class NpipeSocket(object):
def dup(self):
return NpipeSocket(self._handle)
- @check_closed
- def fileno(self):
- return int(self._handle)
-
def getpeername(self):
return self._address
@@ -130,9 +127,6 @@ class NpipeSocket(object):
@check_closed
def recv_into(self, buf, nbytes=0):
- if six.PY2:
- return self._recv_into_py2(buf, nbytes)
-
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
@@ -197,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket
def close(self):
- super(NpipeFileIOBase, self).close()
+ super().close()
self.sock = None
def fileno(self):
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py
new file mode 100644
index 0000000..8e6beb2
--- /dev/null
+++ b/docker/transport/sshconn.py
@@ -0,0 +1,255 @@
+import paramiko
+import queue
+import urllib.parse
+import requests.adapters
+import logging
+import os
+import signal
+import socket
+import subprocess
+
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+import http.client as httplib
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super().__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ ' '.join(args),
+ env=env,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=None if constants.IS_WINDOWS_PLATFORM else preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(httplib.HTTPConnection):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+ self.ssh_host = host
+
+ def connect(self):
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super().__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = None
+ self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
+
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super().__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urllib.parse.urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ self.ssh_conf['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ def _connect(self):
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
+
+ def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if self.ssh_client and not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super().close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
index 8fafec3..31e3014 100644
--- a/docker/transport/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -7,6 +7,8 @@ import sys
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+from docker.transport.basehttpadapter import BaseHTTPAdapter
+
try:
import requests.packages.urllib3 as urllib3
except ImportError:
@@ -22,7 +24,7 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5:
urllib3.connection.match_hostname = match_hostname
-class SSLAdapter(HTTPAdapter):
+class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
@@ -34,7 +36,7 @@ class SSLAdapter(HTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- super(SSLAdapter, self).__init__(**kwargs)
+ super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@@ -57,7 +59,7 @@ class SSLAdapter(HTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
- conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+ conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index c59821a..1b00762 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,8 +1,8 @@
-import six
import requests.adapters
import socket
-from six.moves import http_client as httplib
+import http.client as httplib
+from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
try:
@@ -14,27 +14,15 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class UnixHTTPResponse(httplib.HTTPResponse, object):
- def __init__(self, sock, *args, **kwargs):
- disable_buffering = kwargs.pop('disable_buffering', False)
- if six.PY2:
- # FIXME: We may need to disable buffering on Py3 as well,
- # but there's no clear way to do it at the moment. See:
- # https://github.com/docker/docker-py/issues/1799
- kwargs['buffering'] = not disable_buffering
- super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
-
-
-class UnixHTTPConnection(httplib.HTTPConnection, object):
+class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
- super(UnixHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
- self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -43,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.sock = sock
def putheader(self, header, *values):
- super(UnixHTTPConnection, self).putheader(header, *values)
- if header == 'Connection' and 'Upgrade' in values:
- self.disable_buffering = True
+ super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
- if self.disable_buffering:
- kwargs['disable_buffering'] = True
-
- return UnixHTTPResponse(sock, *args, **kwargs)
+ return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
- super(UnixHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
@@ -69,23 +52,26 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
)
-class UnixAdapter(requests.adapters.HTTPAdapter):
+class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
- 'timeout']
+ 'timeout',
+ 'max_pool_size']
def __init__(self, socket_url, timeout=60,
- pool_connections=constants.DEFAULT_NUM_POOLS):
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
+ self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(UnixAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
@@ -94,7 +80,8 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
return pool
pool = UnixHTTPConnectionPool(
- url, self.socket_path, self.timeout
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
)
self.pools[url] = pool
@@ -107,6 +94,3 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url
-
- def close(self):
- self.pools.clear()
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 0b0d847..b425746 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,11 +1,14 @@
# flake8: noqa
-from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .containers import (
+ ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
+)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
- Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference,
- ServiceMode, TaskTemplate, UpdateConfig
+ Mount, Placement, PlacementPreference, Privileges, Resources,
+ RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate,
+ UpdateConfig, NetworkAttachmentConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/base.py b/docker/types/base.py
index 6891062..8851f1e 100644
--- a/docker/types/base.py
+++ b/docker/types/base.py
@@ -1,7 +1,4 @@
-import six
-
-
class DictType(dict):
def __init__(self, init):
- for k, v in six.iteritems(init):
+ for k, v in init.items():
self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 2521420..f1b60b2 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
@@ -10,7 +8,7 @@ from .base import DictType
from .healthcheck import Healthcheck
-class LogConfigTypesEnum(object):
+class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
@@ -23,6 +21,35 @@ class LogConfigTypesEnum(object):
class LogConfig(DictType):
+ """
+ Configure logging for a container, when provided as an argument to
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+ You may refer to the
+ `official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
+ for more information.
+
+ Args:
+ type (str): Indicate which log driver to use. A set of valid drivers
+ is provided as part of the :py:attr:`LogConfig.types`
+ enum. Other values may be accepted depending on the engine version
+ and available logging plugins.
+ config (dict): A driver-dependent configuration dictionary. Please
+ refer to the driver's documentation for a list of valid config
+ keys.
+
+ Example:
+
+ >>> from docker.types import LogConfig
+ >>> lc = LogConfig(type=LogConfig.types.JSON, config={
+ ... 'max-size': '1g',
+ ... 'labels': 'production_status,geo'
+ ... })
+ >>> hc = client.create_host_config(log_config=lc)
+ >>> container = client.create_container('busybox', 'true',
+ ... host_config=hc)
+ >>> client.inspect_container(container)['HostConfig']['LogConfig']
+ {'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}}
+ """ # noqa: E501
types = LogConfigTypesEnum
def __init__(self, **kwargs):
@@ -32,7 +59,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
- super(LogConfig, self).__init__({
+ super().__init__({
'Type': log_driver_type,
'Config': config
})
@@ -50,25 +77,51 @@ class LogConfig(DictType):
return self['Config']
def set_config_value(self, key, value):
+ """ Set a the value for ``key`` to ``value`` inside the ``config``
+ dict.
+ """
self.config[key] = value
def unset_config(self, key):
+ """ Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
class Ulimit(DictType):
+ """
+ Create a ulimit declaration to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ name (str): Which ulimit will this apply to. The valid names can be
+ found in '/etc/security/limits.conf' on a gnu/linux system.
+ soft (int): The soft limit for this ulimit. Optional.
+ hard (int): The hard limit for this ulimit. Optional.
+
+ Example:
+
+ >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
+ >>> hc = client.create_host_config(ulimits=[nproc_limit])
+ >>> container = client.create_container(
+ 'busybox', 'true', host_config=hc
+ )
+ >>> client.inspect_container(container)['HostConfig']['Ulimits']
+ [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
+
+ """
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
- if not isinstance(name, six.string_types):
+ if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
- super(Ulimit, self).__init__({
+ super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
@@ -99,6 +152,104 @@ class Ulimit(DictType):
self['Hard'] = value
+class DeviceRequest(DictType):
+ """
+ Create a device request to be used with
+ :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
+
+ Args:
+
+ driver (str): Which driver to use for this device. Optional.
+ count (int): Number or devices to request. Optional.
+ Set to -1 to request all available devices.
+ device_ids (list): List of strings for device IDs. Optional.
+ Set either ``count`` or ``device_ids``.
+ capabilities (list): List of lists of strings to request
+ capabilities. Optional. The global list acts like an OR,
+ and the sub-lists are AND. The driver will try to satisfy
+ one of the sub-lists.
+ Available capabilities for the ``nvidia`` driver can be found
+ `here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
+ options (dict): Driver-specific options. Optional.
+ """
+
+ def __init__(self, **kwargs):
+ driver = kwargs.get('driver', kwargs.get('Driver'))
+ count = kwargs.get('count', kwargs.get('Count'))
+ device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
+ capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
+ options = kwargs.get('options', kwargs.get('Options'))
+
+ if driver is None:
+ driver = ''
+ elif not isinstance(driver, str):
+ raise ValueError('DeviceRequest.driver must be a string')
+ if count is None:
+ count = 0
+ elif not isinstance(count, int):
+ raise ValueError('DeviceRequest.count must be an integer')
+ if device_ids is None:
+ device_ids = []
+ elif not isinstance(device_ids, list):
+ raise ValueError('DeviceRequest.device_ids must be a list')
+ if capabilities is None:
+ capabilities = []
+ elif not isinstance(capabilities, list):
+ raise ValueError('DeviceRequest.capabilities must be a list')
+ if options is None:
+ options = {}
+ elif not isinstance(options, dict):
+ raise ValueError('DeviceRequest.options must be a dict')
+
+ super().__init__({
+ 'Driver': driver,
+ 'Count': count,
+ 'DeviceIDs': device_ids,
+ 'Capabilities': capabilities,
+ 'Options': options
+ })
+
+ @property
+ def driver(self):
+ return self['Driver']
+
+ @driver.setter
+ def driver(self, value):
+ self['Driver'] = value
+
+ @property
+ def count(self):
+ return self['Count']
+
+ @count.setter
+ def count(self, value):
+ self['Count'] = value
+
+ @property
+ def device_ids(self):
+ return self['DeviceIDs']
+
+ @device_ids.setter
+ def device_ids(self, value):
+ self['DeviceIDs'] = value
+
+ @property
+ def capabilities(self):
+ return self['Capabilities']
+
+ @capabilities.setter
+ def capabilities(self, value):
+ self['Capabilities'] = value
+
+ @property
+ def options(self):
+ return self['Options']
+
+ @options.setter
+ def options(self, value):
+ self['Options'] = value
+
+
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
@@ -115,13 +266,13 @@ class HostConfig(dict):
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None,
- isolation=None, auto_remove=False, storage_opt=None,
- init=None, init_path=None, volume_driver=None,
- cpu_count=None, cpu_percent=None, nano_cpus=None,
- cpuset_mems=None, runtime=None, mounts=None,
+ cpuset_cpus=None, userns_mode=None, uts_mode=None,
+ pids_limit=None, isolation=None, auto_remove=False,
+ storage_opt=None, init=None, init_path=None,
+ volume_driver=None, cpu_count=None, cpu_percent=None,
+ nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
- device_cgroup_rules=None):
+ device_cgroup_rules=None, device_requests=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@@ -144,7 +295,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
- if isinstance(shm_size, six.string_types):
+ if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
@@ -181,10 +332,11 @@ class HostConfig(dict):
if dns_search:
self['DnsSearch'] = dns_search
- if network_mode:
- self['NetworkMode'] = network_mode
- elif network_mode is None:
- self['NetworkMode'] = 'default'
+ if network_mode == 'host' and port_bindings:
+ raise host_config_incompatible_error(
+ 'network_mode', 'host', 'port_bindings'
+ )
+ self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
@@ -204,7 +356,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
- self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+ self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
@@ -224,11 +376,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- self['Sysctls'][k] = six.text_type(v)
+ for k, v in sysctls.items():
+ self['Sysctls'][k] = str(v)
if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
+ if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
@@ -250,7 +402,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict):
formatted = []
- for k, v in six.iteritems(lxc_conf):
+ for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
@@ -264,10 +416,10 @@ class HostConfig(dict):
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- self['Ulimits'].append(l)
+ for lmt in ulimits:
+ if not isinstance(lmt, Ulimit):
+ lmt = Ulimit(**lmt)
+ self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
@@ -392,6 +544,11 @@ class HostConfig(dict):
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
+ if uts_mode:
+ if uts_mode != "host":
+ raise host_config_value_error("uts_mode", uts_mode)
+ self['UTSMode'] = uts_mode
+
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
@@ -400,7 +557,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit
if isolation:
- if not isinstance(isolation, six.string_types):
+ if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
@@ -450,7 +607,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent
if nano_cpus:
- if not isinstance(nano_cpus, six.integer_types):
+ if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
@@ -476,6 +633,19 @@ class HostConfig(dict):
)
self['DeviceCgroupRules'] = device_cgroup_rules
+ if device_requests is not None:
+ if version_lt(version, '1.40'):
+ raise host_config_version_error('device_requests', '1.40')
+ if not isinstance(device_requests, list):
+ raise host_config_type_error(
+ 'device_requests', device_requests, 'list'
+ )
+ self['DeviceRequests'] = []
+ for req in device_requests:
+ if not isinstance(req, DeviceRequest):
+ req = DeviceRequest(**req)
+ self['DeviceRequests'].append(req)
+
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@@ -493,6 +663,13 @@ def host_config_value_error(param, param_value):
return ValueError(error_msg.format(param, param_value))
+def host_config_incompatible_error(param, param_value, incompatible_param):
+ error_msg = '\"{1}\" {0} is incompatible with {2}'
+ return errors.InvalidArgument(
+ error_msg.format(param, param_value, incompatible_param)
+ )
+
+
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
@@ -520,17 +697,17 @@ class ContainerConfig(dict):
'version 1.29'
)
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
- if isinstance(entrypoint, six.string_types):
+ if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
+ labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
@@ -541,10 +718,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
@@ -573,7 +750,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
+ 'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index ee8624e..10e8101 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -5,15 +5,17 @@ try:
except ImportError:
import urllib3
+from ..errors import DockerException
-class CancellableStream(object):
+
+class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
- ... print event
+ ... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
@@ -30,7 +32,7 @@ class CancellableStream(object):
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
- except socket.error:
+ except OSError:
raise StopIteration
next = __next__
@@ -55,9 +57,17 @@ class CancellableStream(object):
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
else:
sock = sock_fp._sock
- if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 61857c2..dfc88a9 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -1,7 +1,5 @@
from .base import DictType
-import six
-
class Healthcheck(DictType):
"""
@@ -14,7 +12,7 @@ class Healthcheck(DictType):
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- - ``["CMD-SHELL", command]``: RUn command in the system's
+ - ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
@@ -23,15 +21,15 @@ class Healthcheck(DictType):
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
- retries (integer): The number of consecutive failures needed to
+ retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (integer): Start period for the container to
+ start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
- if isinstance(test, six.string_types):
+ if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
@@ -39,7 +37,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
- super(Healthcheck, self).__init__({
+ super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
@@ -53,6 +51,8 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
+ if isinstance(value, str):
+ value = ["CMD-SHELL", value]
self['Test'] = value
@property
diff --git a/docker/types/networks.py b/docker/types/networks.py
index 1c7b2c9..1370dc1 100644
--- a/docker/types/networks.py
+++ b/docker/types/networks.py
@@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
- ipv6_address=None, link_local_ips=None):
+ ipv6_address=None, link_local_ips=None, driver_opt=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
@@ -33,6 +33,15 @@ class EndpointConfig(dict):
if ipam_config:
self['IPAMConfig'] = ipam_config
+ if driver_opt:
+ if version_lt(version, '1.32'):
+ raise errors.InvalidVersion(
+ 'DriverOpts is not supported for API version < 1.32'
+ )
+ if not isinstance(driver_opt, dict):
+ raise TypeError('driver_opt must be a dictionary')
+ self['DriverOpts'] = driver_opt
+
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):
diff --git a/docker/types/services.py b/docker/types/services.py
index 31f4750..fe7cc26 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
@@ -26,8 +24,8 @@ class TaskTemplate(dict):
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
- networks (:py:class:`list`): List of network names or IDs to attach
- the containers to.
+ networks (:py:class:`list`): List of network names or IDs or
+ :py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
@@ -110,16 +108,23 @@ class ContainerSpec(dict):
privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
+ init (boolean): Run an init inside the container that forwards signals
+ and reaps processes.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to the
+ default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
+ the default set for the container.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
- privileges=None, isolation=None):
+ privileges=None, isolation=None, init=None, cap_add=None,
+ cap_drop=None):
self['Image'] = image
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
@@ -149,7 +154,7 @@ class ContainerSpec(dict):
if mounts is not None:
parsed_mounts = []
for mount in mounts:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
@@ -183,6 +188,21 @@ class ContainerSpec(dict):
if isolation is not None:
self['Isolation'] = isolation
+ if init is not None:
+ self['Init'] = init
+
+ if cap_add is not None:
+ if not isinstance(cap_add, list):
+ raise TypeError('cap_add must be a list')
+
+ self['CapabilityAdd'] = cap_add
+
+ if cap_drop is not None:
+ if not isinstance(cap_drop, list):
+ raise TypeError('cap_drop must be a list')
+
+ self['CapabilityDrop'] = cap_drop
+
class Mount(dict):
"""
@@ -219,7 +239,7 @@ class Mount(dict):
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
- 'Unsupported mount type: "{}"'.format(type)
+ f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
@@ -255,7 +275,7 @@ class Mount(dict):
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
- if not isinstance(tmpfs_mode, six.integer_types):
+ if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
@@ -275,7 +295,7 @@ class Mount(dict):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
- 'Invalid mount format "{0}"'.format(string)
+ f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
@@ -342,7 +362,7 @@ def _convert_generic_resources_dict(generic_resources):
' (found {})'.format(type(generic_resources))
)
resources = []
- for kind, value in six.iteritems(generic_resources):
+ for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
@@ -368,10 +388,11 @@ class UpdateConfig(dict):
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
- delay (int): Amount of time between updates.
+ delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
- ``continue`` and ``pause``. Default: ``continue``
+ ``continue``, ``pause``, as well as ``rollback`` since API v1.28.
+ Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
@@ -385,9 +406,9 @@ class UpdateConfig(dict):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
- if failure_action not in ('pause', 'continue'):
+ if failure_action not in ('pause', 'continue', 'rollback'):
raise errors.InvalidArgument(
- 'failure_action must be either `pause` or `continue`.'
+ 'failure_action must be one of `pause`, `continue`, `rollback`'
)
self['FailureAction'] = failure_action
@@ -413,7 +434,31 @@ class UpdateConfig(dict):
self['Order'] = order
-class RestartConditionTypesEnum(object):
+class RollbackConfig(UpdateConfig):
+ """
+ Used to specify the way containe rollbacks should be performed by a service
+
+ Args:
+ parallelism (int): Maximum number of tasks to be rolled back in one
+ iteration (0 means unlimited parallelism). Default: 0
+ delay (int): Amount of time between rollbacks, in nanoseconds.
+ failure_action (string): Action to take if a rolled back task fails to
+ run, or stops running during the rollback. Acceptable values are
+ ``continue``, ``pause`` or ``rollback``.
+ Default: ``continue``
+ monitor (int): Amount of time to monitor each rolled back task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ a rollback before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
+ order (string): Specifies the order of operations when rolling out a
+ rolled back task. Either ``start_first`` or ``stop_first`` are
+ accepted.
+ """
+ pass
+
+
+class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
@@ -444,7 +489,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
- 'Invalid RestartPolicy condition {0}'.format(condition)
+ f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
@@ -503,7 +548,7 @@ def convert_service_ports(ports):
)
result = []
- for k, v in six.iteritems(ports):
+ for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
@@ -623,18 +668,28 @@ class Placement(dict):
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
- constraints (:py:class:`list`): A list of constraints
- preferences (:py:class:`list`): Preferences provide a way to make
- the scheduler aware of factors such as topology. They are
- provided in order from highest to lowest precedence.
- platforms (:py:class:`list`): A list of platforms expressed as
- ``(arch, os)`` tuples
- """
- def __init__(self, constraints=None, preferences=None, platforms=None):
+ constraints (:py:class:`list` of str): A list of constraints
+ preferences (:py:class:`list` of tuple): Preferences provide a way
+ to make the scheduler aware of factors such as topology. They
+ are provided in order from highest to lowest precedence and
+ are expressed as ``(strategy, descriptor)`` tuples. See
+ :py:class:`PlacementPreference` for details.
+ maxreplicas (int): Maximum number of replicas per node
+ platforms (:py:class:`list` of tuple): A list of platforms
+ expressed as ``(arch, os)`` tuples
+ """
+ def __init__(self, constraints=None, preferences=None, platforms=None,
+ maxreplicas=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
- self['Preferences'] = preferences
+ self['Preferences'] = []
+ for pref in preferences:
+ if isinstance(pref, tuple):
+ pref = PlacementPreference(*pref)
+ self['Preferences'].append(pref)
+ if maxreplicas is not None:
+ self['MaxReplicas'] = maxreplicas
if platforms:
self['Platforms'] = []
for plat in platforms:
@@ -643,6 +698,27 @@ class Placement(dict):
})
+class PlacementPreference(dict):
+ """
+ Placement preference to be used as an element in the list of
+ preferences for :py:class:`Placement` objects.
+
+ Args:
+ strategy (string): The placement strategy to implement. Currently,
+ the only supported strategy is ``spread``.
+ descriptor (string): A label descriptor. For the spread strategy,
+ the scheduler will try to spread tasks evenly over groups of
+ nodes identified by this label.
+ """
+ def __init__(self, strategy, descriptor):
+ if strategy != 'spread':
+ raise errors.InvalidArgument(
+ 'PlacementPreference strategy value is invalid ({}):'
+ ' must be "spread".'.format(strategy)
+ )
+ self['Spread'] = {'SpreadDescriptor': descriptor}
+
+
class DNSConfig(dict):
"""
Specification for DNS related configurations in resolver configuration
@@ -662,7 +738,7 @@ class DNSConfig(dict):
class Privileges(dict):
- """
+ r"""
Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition.
@@ -713,3 +789,21 @@ class Privileges(dict):
if len(selinux_context) > 0:
self['SELinuxContext'] = selinux_context
+
+
+class NetworkAttachmentConfig(dict):
+ """
+ Network attachment options for a service.
+
+ Args:
+ target (str): The target network for attachment.
+ Can be a network name or ID.
+ aliases (:py:class:`list`): A list of discoverable alternate names
+ for the service.
+ options (:py:class:`dict`): Driver attachment options for the
+ network target.
+ """
+ def __init__(self, target, aliases=None, options=None):
+ self['Target'] = target
+ self['Aliases'] = aliases
+ self['DriverOpts'] = options
diff --git a/docker/utils/build.py b/docker/utils/build.py
index 4fa5751..ac06043 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -4,8 +4,6 @@ import re
import tarfile
import tempfile
-import six
-
from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
@@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
- extra_names = set(e[0] for e in extra_files)
+ extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
@@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
- except IOError:
- raise IOError(
- 'Can not read file in context: {}'.format(full_path)
+ except OSError:
+ raise OSError(
+ f'Can not read file in context: {full_path}'
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
@@ -105,8 +103,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
for name, contents in extra_files:
info = tarfile.TarInfo(name)
- info.size = len(contents)
- t.addfile(info, io.BytesIO(contents.encode('utf-8')))
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
@@ -118,12 +117,8 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
@@ -153,7 +148,7 @@ def walk(root, patterns, default=True):
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
-class PatternMatcher(object):
+class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
@@ -211,13 +206,12 @@ class PatternMatcher(object):
break
if skip:
continue
- for sub in rec_walk(cur):
- yield sub
+ yield from rec_walk(cur)
return rec_walk(root)
-class Pattern(object):
+class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
diff --git a/docker/utils/config.py b/docker/utils/config.py
index 82a0e2a..8e24959 100644
--- a/docker/utils/config.py
+++ b/docker/utils/config.py
@@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
- log.debug("Trying paths: {0}".format(repr(paths)))
+ log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
+ log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
@@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try:
with open(config_file) as f:
return json.load(f)
- except (IOError, ValueError) as e:
+ except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index c975d4b..cf1baf4 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -27,7 +27,7 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
- '{0} is not available for version < {1}'.format(
+ '{} is not available for version < {}'.format(
f.__name__, version
)
)
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
index cc940a2..90e9f60 100644
--- a/docker/utils/fnmatch.py
+++ b/docker/utils/fnmatch.py
@@ -108,7 +108,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
+ res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
index addffdf..f384175 100644
--- a/docker/utils/json_stream.py
+++ b/docker/utils/json_stream.py
@@ -1,11 +1,6 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import json
import json.decoder
-import six
-
from ..errors import StreamParseError
@@ -20,7 +15,7 @@ def stream_as_text(stream):
instead of byte streams.
"""
for data in stream:
- if not isinstance(data, six.text_type):
+ if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
@@ -46,8 +41,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode)
-def line_splitter(buffer, separator=u'\n'):
- index = buffer.find(six.text_type(separator))
+def line_splitter(buffer, separator='\n'):
+ index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
@@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input.
"""
splitter = splitter or line_splitter
- buffered = six.text_type('')
+ buffered = ''
for data in stream_as_text(stream):
buffered += data
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index bf7d697..e813936 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -3,11 +3,11 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
- "((?P<host>[a-fA-F\d.:]+):)?" # Address
- "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
- "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
- "(?P<proto>/(udp|tcp))?" # Protocol
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
@@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end:
return [start + proto]
if randomly_available_port:
- return ['{}-{}'.format(start, end) + proto]
+ return [f'{start}-{end}' + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py
new file mode 100644
index 0000000..49e98ed
--- /dev/null
+++ b/docker/utils/proxy.py
@@ -0,0 +1,73 @@
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format(
+ self.http, self.https, self.ftp, self.no_proxy)
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 7b96d4f..4a2076e 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -4,14 +4,16 @@ import select
import socket as pysocket
import struct
-import six
-
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
+STDOUT = 1
+STDERR = 2
+
+
class SocketError(Exception):
pass
@@ -23,16 +25,16 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- if six.PY3 and not isinstance(socket, NpipeSocket):
+ if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
- if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
- except EnvironmentError as e:
+ except OSError as e:
if e.errno not in recoverable_errors:
raise
@@ -42,7 +44,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
- data = six.binary_type()
+ data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
@@ -51,28 +53,43 @@ def read_exactly(socket, n):
return data
-def next_frame_size(socket):
+def next_frame_header(socket):
"""
- Returns the size of the next frame of data waiting to be read from socket,
- according to the protocol defined here:
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
- https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
- return -1
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
- _, actual = struct.unpack('>BxxxL', data)
- return actual
+
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
-def frames_iter(socket):
+def frames_iter_no_tty(socket):
"""
- Returns a generator of frames read from socket
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
"""
while True:
- n = next_frame_size(socket)
+ (stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
@@ -84,13 +101,13 @@ def frames_iter(socket):
# We have reached EOF
return
n -= data_length
- yield result
+ yield (stream, result)
-def socket_raw_iter(socket):
+def frames_iter_tty(socket):
"""
- Returns a generator of data read from the socket.
- This is used for non-multiplexed streams.
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
"""
while True:
result = read(socket)
@@ -98,3 +115,53 @@ def socket_raw_iter(socket):
# We have reached EOF
return
yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return bytes().join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ assert frame != (None, None)
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError(f'{stream_id} is not a valid stream')
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index fe3b9a5..f7c3dd7 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -1,31 +1,20 @@
import base64
+import json
import os
import os.path
-import json
import shlex
-from distutils.version import StrictVersion
+import string
from datetime import datetime
-
-import six
+from distutils.version import StrictVersion
from .. import errors
from .. import tls
+from ..constants import DEFAULT_HTTP_HOST
+from ..constants import DEFAULT_UNIX_SOCKET
+from ..constants import DEFAULT_NPIPE
+from ..constants import BYTE_UNITS
-if six.PY2:
- from urllib import splitnport
-else:
- from urllib.parse import splitnport
-
-DEFAULT_HTTP_HOST = "127.0.0.1"
-DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
-DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
-
-BYTE_UNITS = {
- 'b': 1,
- 'k': 1024,
- 'm': 1024 * 1024,
- 'g': 1024 * 1024 * 1024
-}
+from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
@@ -44,8 +33,7 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header):
data = base64.b64decode(header)
- if six.PY3:
- data = data.decode('utf-8')
+ data = data.decode('utf-8')
return json.loads(data)
@@ -85,7 +73,7 @@ def _convert_port_binding(binding):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
- elif isinstance(binding[0], six.string_types):
+ elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
@@ -109,7 +97,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings):
result = {}
- for k, v in six.iteritems(port_bindings):
+ for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
@@ -126,7 +114,7 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
- if isinstance(k, six.binary_type):
+ if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
@@ -137,7 +125,7 @@ def convert_volume_binds(binds):
)
bind = v['bind']
- if isinstance(bind, six.binary_type):
+ if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
@@ -148,13 +136,13 @@ def convert_volume_binds(binds):
mode = 'rw'
result.append(
- six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ f'{k}:{bind}:{mode}'
)
else:
- if isinstance(v, six.binary_type):
+ if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
- six.text_type('{0}:{1}:rw').format(k, v)
+ f'{k}:{v}:rw'
)
return result
@@ -171,7 +159,7 @@ def convert_tmpfs_mounts(tmpfs):
result = {}
for mount in tmpfs:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
@@ -196,7 +184,7 @@ def convert_service_networks(networks):
result = []
for n in networks:
- if isinstance(n, six.string_types):
+ if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
@@ -212,75 +200,93 @@ def parse_repository_tag(repo_name):
return repo_name, None
-# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
-# fd:// protocol unsupported (for obvious reasons)
-# Added support for http and https
-# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
- proto = "http+unix"
- port = None
path = ''
+ port = None
+ host = None
+ # Sensible defaults
if not addr and is_win32:
- addr = DEFAULT_NPIPE
-
+ return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
- if addr.startswith('http://'):
- addr = addr.replace('http://', 'tcp://')
- if addr.startswith('http+unix://'):
- addr = addr.replace('http+unix://', 'unix://')
- if addr == 'tcp://':
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse('//' + addr, 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
- "Invalid bind address format: {0}".format(addr)
+ f"Invalid bind address protocol: {addr}"
)
- elif addr.startswith('unix://'):
- addr = addr[7:]
- elif addr.startswith('tcp://'):
- proto = 'http{0}'.format('s' if tls else '')
- addr = addr[6:]
- elif addr.startswith('https://'):
- proto = "https"
- addr = addr[8:]
- elif addr.startswith('npipe://'):
- proto = 'npipe'
- addr = addr[8:]
- elif addr.startswith('fd://'):
- raise errors.DockerException("fd protocol is not implemented")
- else:
- if "://" in addr:
- raise errors.DockerException(
- "Invalid bind address protocol: {0}".format(addr)
- )
- proto = "https" if tls else "http"
- if proto in ("http", "https"):
- address_parts = addr.split('/', 1)
- host = address_parts[0]
- if len(address_parts) == 2:
- path = '/' + address_parts[1]
- host, port = splitnport(host)
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
- if port is None:
- raise errors.DockerException(
- "Invalid port: {0}".format(addr)
- )
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ f'Invalid bind address format: {addr}'
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: no path allowed for this protocol:'
+ ' {}'.format(addr)
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = '/'.join((parsed_url.hostname, path))
+
+ if proto in ('tcp', 'ssh'):
+ # parsed_url.hostname strips brackets from IPv6 addresses,
+ # which can be problematic hence our use of splitnport() instead.
+ host, port = splitnport(parsed_url.netloc)
+ if port is None or port < 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: port is required:'
+ ' {}'.format(addr)
+ )
+ port = 22
if not host:
host = DEFAULT_HTTP_HOST
- else:
- host = addr
- if proto in ("http", "https") and port == -1:
- raise errors.DockerException(
- "Bind address needs a port: {0}".format(addr))
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = 'http{}'.format('s' if tls else '')
+ elif proto == 'unix':
+ proto = 'http+unix'
- if proto == "http+unix" or proto == 'npipe':
- return "{0}://{1}".format(proto, host).rstrip('/')
- return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
+ if proto in ('http+unix', 'npipe'):
+ return f"{proto}://{path}".rstrip('/')
+ return f'{proto}://{host}:{port}{path}'.rstrip('/')
def parse_devices(devices):
@@ -289,9 +295,9 @@ def parse_devices(devices):
if isinstance(device, dict):
device_list.append(device)
continue
- if not isinstance(device, six.string_types):
+ if not isinstance(device, str):
raise errors.DockerException(
- 'Invalid device type {0}'.format(type(device))
+ f'Invalid device type {type(device)}'
)
device_mapping = device.split(':')
if device_mapping:
@@ -332,9 +338,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
params = {}
if host:
- params['base_url'] = (
- host.replace('tcp://', 'https://') if enable_tls else host
- )
+ params['base_url'] = host
if not enable_tls:
return params
@@ -361,12 +365,15 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
def convert_filters(filters):
result = {}
- for k, v in six.iteritems(filters):
+ for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
- result[k] = v
+ result[k] = [
+ str(item) if not isinstance(item, str) else item
+ for item in v
+ ]
return json.dumps(result)
@@ -377,7 +384,7 @@ def datetime_to_timestamp(dt):
def parse_bytes(s):
- if isinstance(s, six.integer_types + (float,)):
+ if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
@@ -398,10 +405,10 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit():
try:
- digits = int(digits_part)
+ digits = float(digits_part)
except ValueError:
raise errors.DockerException(
- 'Failed converting the string value for memory ({0}) to'
+ 'Failed converting the string value for memory ({}) to'
' an integer.'.format(digits_part)
)
@@ -409,7 +416,7 @@ def parse_bytes(s):
s = int(digits * units[suffix])
else:
raise errors.DockerException(
- 'The specified value for memory ({0}) should specify the'
+ 'The specified value for memory ({}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
@@ -419,9 +426,9 @@ def parse_bytes(s):
def normalize_links(links):
if isinstance(links, dict):
- links = six.iteritems(links)
+ links = iter(links.items())
- return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
+ return [f'{k}:{v}' if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
@@ -431,7 +438,7 @@ def parse_env_file(env_file):
"""
environment = {}
- with open(env_file, 'r') as f:
+ with open(env_file) as f:
for line in f:
if line[0] == '#':
@@ -447,15 +454,13 @@ def parse_env_file(env_file):
environment[k] = v
else:
raise errors.DockerException(
- 'Invalid line in environment file {0}:\n{1}'.format(
+ 'Invalid line in environment file {}:\n{}'.format(
env_file, line))
return environment
def split_command(command):
- if six.PY2 and not isinstance(command, six.binary_type):
- command = command.encode('utf-8')
return shlex.split(command)
@@ -463,22 +468,22 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
- if isinstance(value, six.binary_type):
+ if isinstance(value, bytes):
value = value.decode('utf-8')
- return u'{key}={value}'.format(key=key, value=value)
- return [format_env(*var) for var in six.iteritems(environment)]
+ return f'{key}={value}'
+ return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
- '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
]
return [
- '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
]
diff --git a/docker/version.py b/docker/version.py
index d451374..4259432 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "3.4.1"
-version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
+version = "5.0.3"
+version_info = tuple(int(d) for d in version.split("-")[0].split("."))