summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
Diffstat (limited to 'docker')
-rw-r--r--docker/api/build.py12
-rw-r--r--docker/api/client.py44
-rw-r--r--docker/api/config.py15
-rw-r--r--docker/api/container.py58
-rw-r--r--docker/api/daemon.py2
-rw-r--r--docker/api/exec_api.py6
-rw-r--r--docker/api/image.py27
-rw-r--r--docker/api/network.py6
-rw-r--r--docker/api/plugin.py6
-rw-r--r--docker/api/secret.py7
-rw-r--r--docker/api/service.py4
-rw-r--r--docker/api/swarm.py14
-rw-r--r--docker/api/volume.py8
-rw-r--r--docker/auth.py38
-rw-r--r--docker/client.py4
-rw-r--r--docker/constants.py4
-rw-r--r--docker/context/api.py6
-rw-r--r--docker/context/config.py4
-rw-r--r--docker/context/context.py11
-rw-r--r--docker/credentials/store.py27
-rw-r--r--docker/errors.py26
-rw-r--r--docker/models/configs.py2
-rw-r--r--docker/models/containers.py8
-rw-r--r--docker/models/images.py18
-rw-r--r--docker/models/plugins.py5
-rw-r--r--docker/models/resource.py9
-rw-r--r--docker/models/secrets.py3
-rw-r--r--docker/models/services.py6
-rw-r--r--docker/models/swarm.py2
-rw-r--r--docker/tls.py2
-rw-r--r--docker/transport/basehttpadapter.py2
-rw-r--r--docker/transport/npipeconn.py17
-rw-r--r--docker/transport/npipesocket.py8
-rw-r--r--docker/transport/sshconn.py38
-rw-r--r--docker/transport/ssladapter.py4
-rw-r--r--docker/transport/unixconn.py32
-rw-r--r--docker/types/base.py5
-rw-r--r--docker/types/containers.py42
-rw-r--r--docker/types/daemon.py4
-rw-r--r--docker/types/healthcheck.py8
-rw-r--r--docker/types/services.py39
-rw-r--r--docker/utils/build.py25
-rw-r--r--docker/utils/config.py6
-rw-r--r--docker/utils/decorators.py2
-rw-r--r--docker/utils/fnmatch.py2
-rw-r--r--docker/utils/json_stream.py13
-rw-r--r--docker/utils/ports.py4
-rw-r--r--docker/utils/socket.py14
-rw-r--r--docker/utils/utils.py73
-rw-r--r--docker/version.py4
50 files changed, 341 insertions, 385 deletions
diff --git a/docker/api/build.py b/docker/api/build.py
index 365129a..aac43c4 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -12,7 +12,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class BuildApiMixin(object):
+class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
@@ -132,7 +132,7 @@ class BuildApiMixin(object):
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
- 'Invalid container_limits key {0}'.format(key)
+ f'Invalid container_limits key {key}'
)
if custom_context:
@@ -150,7 +150,7 @@ class BuildApiMixin(object):
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
- with open(dockerignore, 'r') as f:
+ with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[l.strip() for l in f.read().splitlines()]
@@ -313,7 +313,7 @@ class BuildApiMixin(object):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
- 'Sending auth config ({0})'.format(
+ 'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
@@ -344,9 +344,9 @@ def process_dockerfile(dockerfile, path):
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
- with open(abs_dockerfile, 'r') as df:
+ with open(abs_dockerfile) as df:
return (
- '.dockerfile.{0:x}'.format(random.getrandbits(160)),
+ f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
diff --git a/docker/api/client.py b/docker/api/client.py
index 2b67291..2667922 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -1,10 +1,10 @@
import json
import struct
+import urllib
from functools import partial
import requests
import requests.exceptions
-import six
import websocket
from .. import auth
@@ -107,7 +107,7 @@ class APIClient(
user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None, use_ssh_client=False,
max_pool_size=DEFAULT_MAX_POOL_SIZE):
- super(APIClient, self).__init__()
+ super().__init__()
if tls and not base_url:
raise TLSParameterError(
@@ -192,14 +192,14 @@ class APIClient(
# version detection needs to be after unix adapter mounting
if version is None or (isinstance(
version,
- six.string_types
+ str
) and version.lower() == 'auto'):
self._version = self._retrieve_server_version()
else:
self._version = version
- if not isinstance(self._version, six.string_types):
+ if not isinstance(self._version, str):
raise DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
+ 'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
@@ -219,7 +219,7 @@ class APIClient(
)
except Exception as e:
raise DockerException(
- 'Error while fetching server API version: {0}'.format(e)
+ f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
@@ -246,21 +246,21 @@ class APIClient(
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
- if not isinstance(arg, six.string_types):
+ if not isinstance(arg, str):
raise ValueError(
- 'Expected a string but found {0} ({1}) '
+ 'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
- quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
+ quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
+ return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+ return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -284,7 +284,7 @@ class APIClient(
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
- for k, v in six.iteritems(data):
+ for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
@@ -320,12 +320,10 @@ class APIClient(
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
- elif six.PY3:
+ else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
@@ -343,8 +341,7 @@ class APIClient(
if response.raw._fp.chunked:
if decode:
- for chunk in json_stream(self._stream_helper(response, False)):
- yield chunk
+ yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
@@ -400,8 +397,13 @@ class APIClient(
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
- for out in response.iter_content(chunk_size, decode):
- yield out
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
@@ -465,7 +467,7 @@ class APIClient(
self._result(res, binary=True)
self._raise_for_status(res)
- sep = six.binary_type()
+ sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
@@ -479,7 +481,7 @@ class APIClient(
def get_adapter(self, url):
try:
- return super(APIClient, self).get_adapter(url)
+ return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
diff --git a/docker/api/config.py b/docker/api/config.py
index 93e5168..88c367e 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -1,13 +1,11 @@
import base64
-import six
-
from .. import utils
-class ConfigApiMixin(object):
+class ConfigApiMixin:
@utils.minimum_version('1.30')
- def create_config(self, name, data, labels=None):
+ def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
@@ -15,6 +13,9 @@ class ConfigApiMixin(object):
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
+ templating (dict): dictionary containing the name of the
+ templating driver to be used expressed as
+ { name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
@@ -22,12 +23,12 @@ class ConfigApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
- 'Labels': labels
+ 'Labels': labels,
+ 'Templating': templating
}
url = self._url('/configs/create')
diff --git a/docker/api/container.py b/docker/api/container.py
index 754b5dc..83fcd4f 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,7 +1,5 @@
from datetime import datetime
-import six
-
from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
@@ -12,7 +10,7 @@ from ..types import HostConfig
from ..types import NetworkingConfig
-class ContainerApiMixin(object):
+class ContainerApiMixin:
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False, demux=False):
@@ -244,9 +242,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
@@ -258,22 +256,22 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
Or without host port assignment:
.. code-block:: python
- cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+ client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
- host_config=cli.create_host_config(port_bindings={
+ host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
@@ -283,7 +281,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
@@ -291,7 +289,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- cli.create_host_config(port_bindings={
+ client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
@@ -307,9 +305,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds={
+ host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
@@ -326,9 +324,9 @@ class ContainerApiMixin(object):
.. code-block:: python
- container_id = cli.create_container(
+ container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds=[
+ host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
@@ -346,15 +344,15 @@ class ContainerApiMixin(object):
.. code-block:: python
- networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config(
+ networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
- ctnr = docker_client.create_container(
+ ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -408,7 +406,7 @@ class ContainerApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(environment, dict):
@@ -581,7 +579,7 @@ class ContainerApiMixin(object):
Example:
- >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ >>> client.api.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
@@ -612,11 +610,11 @@ class ContainerApiMixin(object):
Example:
- >>> docker_client.create_network('network1')
- >>> networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config()
+ >>> client.api.create_network('network1')
+ >>> networking_config = client.api.create_networking_config({
+ 'network1': client.api.create_endpoint_config()
})
- >>> container = docker_client.create_container(
+ >>> container = client.api.create_container(
img, command, networking_config=networking_config
)
@@ -650,7 +648,7 @@ class ContainerApiMixin(object):
Example:
- >>> endpoint_config = client.create_endpoint_config(
+ >>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
@@ -729,7 +727,7 @@ class ContainerApiMixin(object):
>>> c = docker.APIClient()
>>> f = open('./sh_bin.tar', 'wb')
- >>> bits, stat = c.get_archive(container, '/bin/sh')
+ >>> bits, stat = c.api.get_archive(container, '/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
@@ -790,7 +788,7 @@ class ContainerApiMixin(object):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
- if not isinstance(signal, six.string_types):
+ if not isinstance(signal, str):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
@@ -916,7 +914,7 @@ class ContainerApiMixin(object):
.. code-block:: python
- >>> cli.port('7174d6347063', 80)
+ >>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
@@ -1095,10 +1093,10 @@ class ContainerApiMixin(object):
Example:
- >>> container = cli.create_container(
+ >>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
- >>> cli.start(container=container.get('Id'))
+ >>> client.api.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 6b71926..a857213 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -4,7 +4,7 @@ from datetime import datetime
from .. import auth, types, utils
-class DaemonApiMixin(object):
+class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 4c49ac3..496308a 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,10 +1,8 @@
-import six
-
from .. import errors
from .. import utils
-class ExecApiMixin(object):
+class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
@@ -45,7 +43,7 @@ class ExecApiMixin(object):
'Setting environment for exec is not supported in API < 1.25'
)
- if isinstance(cmd, six.string_types):
+ if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
diff --git a/docker/api/image.py b/docker/api/image.py
index 56c5448..772d889 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,15 +1,13 @@
import logging
import os
-import six
-
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
-class ImageApiMixin(object):
+class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
@@ -31,7 +29,7 @@ class ImageApiMixin(object):
Example:
- >>> image = cli.get_image("busybox:latest")
+ >>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
@@ -130,7 +128,7 @@ class ImageApiMixin(object):
params = _import_image_params(
repository, tag, image,
- src=(src if isinstance(src, six.string_types) else None),
+ src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
@@ -139,7 +137,7 @@ class ImageApiMixin(object):
return self._result(
self._post(u, data=None, params=params)
)
- elif isinstance(src, six.string_types): # from file path
+ elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
@@ -379,7 +377,7 @@ class ImageApiMixin(object):
Example:
- >>> for line in cli.pull('busybox', stream=True, decode=True):
+ >>> for line in client.api.pull('busybox', stream=True, decode=True):
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
@@ -458,7 +456,7 @@ class ImageApiMixin(object):
If the server returns an error.
Example:
- >>> for line in cli.push('yourname/app', stream=True, decode=True):
+ >>> for line in client.api.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
@@ -509,13 +507,14 @@ class ImageApiMixin(object):
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
- def search(self, term):
+ def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
+ limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
@@ -524,8 +523,12 @@ class ImageApiMixin(object):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
+ params = {'term': term}
+ if limit is not None:
+ params['limit'] = limit
+
return self._result(
- self._get(self._url("/images/search"), params={'term': term}),
+ self._get(self._url("/images/search"), params=params),
True
)
@@ -549,7 +552,7 @@ class ImageApiMixin(object):
Example:
- >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
@@ -566,7 +569,7 @@ class ImageApiMixin(object):
def is_file(src):
try:
return (
- isinstance(src, six.string_types) and
+ isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
diff --git a/docker/api/network.py b/docker/api/network.py
index 139c2d1..e95c5fc 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -4,7 +4,7 @@ from ..utils import version_lt
from .. import utils
-class NetworkApiMixin(object):
+class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker network ls`` command.
@@ -75,7 +75,7 @@ class NetworkApiMixin(object):
Example:
A network using the bridge driver:
- >>> client.create_network("network1", driver="bridge")
+ >>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
@@ -90,7 +90,7 @@ class NetworkApiMixin(object):
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
- >>> docker_client.create_network("network1", driver="bridge",
+ >>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index f6c0b13..57110f1 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -1,9 +1,7 @@
-import six
-
from .. import auth, utils
-class PluginApiMixin(object):
+class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
@@ -21,7 +19,7 @@ class PluginApiMixin(object):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
- data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
diff --git a/docker/api/secret.py b/docker/api/secret.py
index e57952b..cd440b9 100644
--- a/docker/api/secret.py
+++ b/docker/api/secret.py
@@ -1,12 +1,10 @@
import base64
-import six
-
from .. import errors
from .. import utils
-class SecretApiMixin(object):
+class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
@@ -25,8 +23,7 @@ class SecretApiMixin(object):
data = data.encode('utf-8')
data = base64.b64encode(data)
- if six.PY3:
- data = data.decode('ascii')
+ data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
diff --git a/docker/api/service.py b/docker/api/service.py
index e9027bf..371f541 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -45,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
- raise_version_error('force_update', '1.25')
+ raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
@@ -113,7 +113,7 @@ def _merge_task_template(current, override):
return merged
-class ServiceApiMixin(object):
+class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 897f08e..db40fdd 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,5 @@
import logging
-from six.moves import http_client
+import http.client as http_client
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
@@ -8,7 +8,7 @@ from .. import utils
log = logging.getLogger(__name__)
-class SwarmApiMixin(object):
+class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
@@ -58,10 +58,10 @@ class SwarmApiMixin(object):
Example:
- >>> spec = client.create_swarm_spec(
+ >>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
- >>> client.init_swarm(
+ >>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
@@ -354,8 +354,8 @@ class SwarmApiMixin(object):
Example:
- >>> key = client.get_unlock_key()
- >>> client.unlock_node(key)
+ >>> key = client.api.get_unlock_key()
+ >>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
@@ -396,7 +396,7 @@ class SwarmApiMixin(object):
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
- >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
diff --git a/docker/api/volume.py b/docker/api/volume.py
index 900a608..86b0018 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -2,7 +2,7 @@ from .. import errors
from .. import utils
-class VolumeApiMixin(object):
+class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
@@ -21,7 +21,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.volumes()
+ >>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
@@ -56,7 +56,7 @@ class VolumeApiMixin(object):
Example:
- >>> volume = cli.create_volume(name='foobar', driver='local',
+ >>> volume = client.api.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
@@ -104,7 +104,7 @@ class VolumeApiMixin(object):
Example:
- >>> cli.inspect_volume('foobar')
+ >>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
diff --git a/docker/auth.py b/docker/auth.py
index 6a07ea2..4fa798f 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -2,14 +2,12 @@ import base64
import json
import logging
-import six
-
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
@@ -18,13 +16,13 @@ log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid index name ({0}). Cannot begin or end with a'
+ 'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
@@ -98,10 +96,10 @@ class AuthConfig(dict):
"""
conf = {}
- for registry, entry in six.iteritems(entries):
+ for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
- 'Config entry for key {0} is not auth config'.format(
+ 'Config entry for key {} is not auth config'.format(
registry
)
)
@@ -111,14 +109,14 @@ class AuthConfig(dict):
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
- 'Invalid configuration for registry {0}'.format(
+ 'Invalid configuration for registry {}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
- 'Found an IdentityToken entry for registry {0}'.format(
+ 'Found an IdentityToken entry for registry {}'.format(
registry
)
)
@@ -132,7 +130,7 @@ class AuthConfig(dict):
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
- 'Auth data for {0} is absent. Client might be using a '
+ 'Auth data for {} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
@@ -140,7 +138,7 @@ class AuthConfig(dict):
username, password = decode_auth(entry['auth'])
log.debug(
- 'Found entry (registry={0}, username={1})'
+ 'Found entry (registry={}, username={})'
.format(repr(registry), repr(username))
)
@@ -170,7 +168,7 @@ class AuthConfig(dict):
try:
with open(config_file) as f:
config_dict = json.load(f)
- except (IOError, KeyError, ValueError) as e:
+ except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
@@ -230,7 +228,7 @@ class AuthConfig(dict):
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
- 'Using credentials store "{0}"'.format(store_name)
+ f'Using credentials store "{store_name}"'
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
@@ -239,15 +237,15 @@ class AuthConfig(dict):
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ log.debug(f"Looking for auth entry for {repr(registry)}")
if registry in self.auths:
- log.debug("Found {0}".format(repr(registry)))
+ log.debug(f"Found {repr(registry)}")
return self.auths[registry]
- for key, conf in six.iteritems(self.auths):
+ for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
- log.debug("Found {0}".format(repr(key)))
+ log.debug(f"Found {repr(key)}")
return conf
log.debug("No entry found")
@@ -258,7 +256,7 @@ class AuthConfig(dict):
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
- log.debug("Looking for auth entry for {0}".format(repr(registry)))
+ log.debug(f"Looking for auth entry for {repr(registry)}")
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
@@ -278,7 +276,7 @@ class AuthConfig(dict):
return None
except credentials.StoreError as e:
raise errors.DockerException(
- 'Credentials store error: {0}'.format(repr(e))
+ f'Credentials store error: {repr(e)}'
)
def _get_store_instance(self, name):
@@ -329,7 +327,7 @@ def convert_to_hostname(url):
def decode_auth(auth):
- if isinstance(auth, six.string_types):
+ if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
diff --git a/docker/client.py b/docker/client.py
index 5add5d7..4dbd846 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
-class DockerClient(object):
+class DockerClient:
"""
A client for communicating with a Docker server.
@@ -212,7 +212,7 @@ class DockerClient(object):
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
- s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
diff --git a/docker/constants.py b/docker/constants.py
index 43fce61..d5bfc35 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,7 +1,7 @@
import sys
from .version import version
-DEFAULT_DOCKER_API_VERSION = '1.39'
+DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -28,7 +28,7 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
-DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
+DEFAULT_USER_AGENT = f"docker-sdk-python/{version}"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
diff --git a/docker/context/api.py b/docker/context/api.py
index c45115b..380e8c4 100644
--- a/docker/context/api.py
+++ b/docker/context/api.py
@@ -9,7 +9,7 @@ from docker.context.config import write_context_name_to_docker_config
from docker.context import Context
-class ContextAPI(object):
+class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
@@ -109,7 +109,7 @@ class ContextAPI(object):
if filename == METAFILE:
try:
data = json.load(
- open(os.path.join(dirname, filename), "r"))
+ open(os.path.join(dirname, filename)))
names.append(data["Name"])
except Exception as e:
raise errors.ContextException(
@@ -138,7 +138,7 @@ class ContextAPI(object):
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(
- 'Failed to set current context: {}'.format(err))
+ f'Failed to set current context: {err}')
@classmethod
def remove_context(cls, name):
diff --git a/docker/context/config.py b/docker/context/config.py
index baf54f7..d761aef 100644
--- a/docker/context/config.py
+++ b/docker/context/config.py
@@ -15,7 +15,7 @@ def get_current_context_name():
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
- with open(docker_cfg_path, "r") as f:
+ with open(docker_cfg_path) as f:
name = json.load(f).get("currentContext", "default")
except Exception:
return "default"
@@ -29,7 +29,7 @@ def write_context_name_to_docker_config(name=None):
config = {}
if docker_cfg_path:
try:
- with open(docker_cfg_path, "r") as f:
+ with open(docker_cfg_path) as f:
config = json.load(f)
except Exception as e:
return e
diff --git a/docker/context/context.py b/docker/context/context.py
index 2413b2e..dbaa01c 100644
--- a/docker/context/context.py
+++ b/docker/context/context.py
@@ -11,6 +11,7 @@ from docker.context.config import get_context_host
class Context:
"""A context."""
+
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
@@ -93,7 +94,7 @@ class Context:
try:
with open(meta_file) as f:
metadata = json.load(f)
- except (IOError, KeyError, ValueError) as e:
+ except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
@@ -127,8 +128,12 @@ class Context:
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
+ verify = None
+ if endpoint == "docker" and not self.endpoints["docker"].get(
+ "SkipTLSVerify", False):
+ verify = True
certs[endpoint] = TLSConfig(
- client_cert=(cert, key), ca_cert=ca_cert)
+ client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
@@ -166,7 +171,7 @@ class Context:
rmtree(self.tls_path)
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
diff --git a/docker/credentials/store.py b/docker/credentials/store.py
index 0017888..e55976f 100644
--- a/docker/credentials/store.py
+++ b/docker/credentials/store.py
@@ -2,15 +2,13 @@ import errno
import json
import subprocess
-import six
-
from . import constants
from . import errors
from .utils import create_environment_dict
from .utils import find_executable
-class Store(object):
+class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
@@ -30,7 +28,7 @@ class Store(object):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
- if not isinstance(server, six.binary_type):
+ if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
@@ -41,7 +39,7 @@ class Store(object):
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
- 'No matching credentials in {}'.format(self.program)
+ f'No matching credentials in {self.program}'
)
return result
@@ -61,7 +59,7 @@ class Store(object):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
- if not isinstance(server, six.binary_type):
+ if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
@@ -75,20 +73,9 @@ class Store(object):
output = None
env = create_environment_dict(self.environment)
try:
- if six.PY3:
- output = subprocess.check_output(
- [self.exe, subcmd], input=data_input, env=env,
- )
- else:
- process = subprocess.Popen(
- [self.exe, subcmd], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, env=env,
- )
- output, _ = process.communicate(data_input)
- if process.returncode != 0:
- raise subprocess.CalledProcessError(
- returncode=process.returncode, cmd='', output=output
- )
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:
diff --git a/docker/errors.py b/docker/errors.py
index ab30a29..ba95256 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -38,25 +38,25 @@ class APIError(requests.exceptions.HTTPError, DockerException):
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
- super(APIError, self).__init__(message)
+ super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
- message = super(APIError, self).__str__()
+ message = super().__str__()
if self.is_client_error():
- message = '{0} Client Error for {1}: {2}'.format(
+ message = '{} Client Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
elif self.is_server_error():
- message = '{0} Server Error for {1}: {2}'.format(
+ message = '{} Server Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
if self.explanation:
- message = '{0} ("{1}")'.format(message, self.explanation)
+ message = f'{message} ("{self.explanation}")'
return message
@@ -133,11 +133,11 @@ class ContainerError(DockerException):
self.image = image
self.stderr = stderr
- err = ": {}".format(stderr) if stderr is not None else ""
+ err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
- super(ContainerError, self).__init__(msg)
+ super().__init__(msg)
class StreamParseError(RuntimeError):
@@ -147,7 +147,7 @@ class StreamParseError(RuntimeError):
class BuildError(DockerException):
def __init__(self, reason, build_log):
- super(BuildError, self).__init__(reason)
+ super().__init__(reason)
self.msg = reason
self.build_log = build_log
@@ -157,8 +157,8 @@ class ImageLoadError(DockerException):
def create_unexpected_kwargs_error(name, kwargs):
- quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
- text = ["{}() ".format(name)]
+ quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
+ text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
@@ -172,7 +172,7 @@ class MissingContextParameter(DockerException):
self.param = param
def __str__(self):
- return ("missing parameter: {}".format(self.param))
+ return (f"missing parameter: {self.param}")
class ContextAlreadyExists(DockerException):
@@ -180,7 +180,7 @@ class ContextAlreadyExists(DockerException):
self.name = name
def __str__(self):
- return ("context {} already exists".format(self.name))
+ return (f"context {self.name} already exists")
class ContextException(DockerException):
@@ -196,4 +196,4 @@ class ContextNotFound(DockerException):
self.name = name
def __str__(self):
- return ("context '{}' not found".format(self.name))
+ return (f"context '{self.name}' not found")
diff --git a/docker/models/configs.py b/docker/models/configs.py
index 7f23f65..3588c8b 100644
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -7,7 +7,7 @@ class Config(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
diff --git a/docker/models/containers.py b/docker/models/containers.py
index 36cbbc4..957deed 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -761,6 +761,14 @@ class ContainerCollection(Collection):
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+ Or a list of strings which each one of its elements specifies a mount volume.
+
+ For example:
+
+ .. code-block:: python
+
+ ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
+
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
diff --git a/docker/models/images.py b/docker/models/images.py
index e635588..46f8efe 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -2,8 +2,6 @@ import itertools
import re
import warnings
-import six
-
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError, InvalidArgument
@@ -17,7 +15,7 @@ class Image(Model):
An image on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+ return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
@@ -84,19 +82,19 @@ class Image(Model):
Example:
- >>> image = cli.get_image("busybox:latest")
+ >>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
- >>> for chunk in image:
+ >>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
- if isinstance(named, six.string_types):
+ if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
- "{} is not a valid tag for this image".format(named)
+ f"{named} is not a valid tag for this image"
)
img = named
@@ -127,7 +125,7 @@ class RegistryData(Model):
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
- super(RegistryData, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.image_name = image_name
@property
@@ -180,7 +178,7 @@ class RegistryData(Model):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
- '"{0}" is not a valid platform descriptor'.format(platform)
+ f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
@@ -277,7 +275,7 @@ class ImageCollection(Collection):
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
- if isinstance(resp, six.string_types):
+ if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
index ae5851c..37ecefb 100644
--- a/docker/models/plugins.py
+++ b/docker/models/plugins.py
@@ -7,7 +7,7 @@ class Plugin(Model):
A plugin on the server.
"""
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -117,8 +117,7 @@ class Plugin(Model):
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
- for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
- yield d
+ yield from self.client.api.upgrade_plugin(self.name, remote, privileges)
self.reload()
diff --git a/docker/models/resource.py b/docker/models/resource.py
index ed3900a..dec2349 100644
--- a/docker/models/resource.py
+++ b/docker/models/resource.py
@@ -1,5 +1,4 @@
-
-class Model(object):
+class Model:
"""
A base class for representing a single object on the server.
"""
@@ -18,13 +17,13 @@ class Model(object):
self.attrs = {}
def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+ return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
- return hash("%s:%s" % (self.__class__.__name__, self.id))
+ return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
@@ -49,7 +48,7 @@ class Model(object):
self.attrs = new_model.attrs
-class Collection(object):
+class Collection:
"""
A base class for representing all objects of a particular type on the
server.
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
index ca11ede..da01d44 100644
--- a/docker/models/secrets.py
+++ b/docker/models/secrets.py
@@ -7,7 +7,7 @@ class Secret(Model):
id_attribute = 'ID'
def __repr__(self):
- return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+ return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
@@ -30,6 +30,7 @@ class SecretCollection(Collection):
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
diff --git a/docker/models/services.py b/docker/models/services.py
index a29ff13..200dd33 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -213,6 +213,10 @@ class ServiceCollection(Collection):
to the service.
privileges (Privileges): Security options for the service's
containers.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to
+ the default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop
+ from the default set for the container.
Returns:
:py:class:`Service`: The created service.
@@ -277,6 +281,8 @@ class ServiceCollection(Collection):
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
+ 'cap_add',
+ 'cap_drop',
'command',
'configs',
'dns_config',
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index 755c17d..b0b1a2e 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -11,7 +11,7 @@ class Swarm(Model):
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
- super(Swarm, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
diff --git a/docker/tls.py b/docker/tls.py
index 1b297ab..067d556 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -5,7 +5,7 @@ from . import errors
from .transport import SSLHTTPAdapter
-class TLSConfig(object):
+class TLSConfig:
"""
TLS configuration.
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py
index 4d819b6..dfbb193 100644
--- a/docker/transport/basehttpadapter.py
+++ b/docker/transport/basehttpadapter.py
@@ -3,6 +3,6 @@ import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
- super(BaseHTTPAdapter, self).close()
+ super().close()
if hasattr(self, 'pools'):
self.pools.clear()
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
index 70d8519..df67f21 100644
--- a/docker/transport/npipeconn.py
+++ b/docker/transport/npipeconn.py
@@ -1,14 +1,11 @@
-import six
+import queue
import requests.adapters
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -18,9 +15,9 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class NpipeHTTPConnection(httplib.HTTPConnection, object):
+class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
- super(NpipeHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
@@ -35,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
- super(NpipeHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
@@ -57,7 +54,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -85,7 +82,7 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(NpipeHTTPAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
index 176b5c8..766372a 100644
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -2,7 +2,6 @@ import functools
import time
import io
-import six
import win32file
import win32pipe
@@ -24,7 +23,7 @@ def check_closed(f):
return wrapped
-class NpipeSocket(object):
+class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
@@ -128,9 +127,6 @@ class NpipeSocket(object):
@check_closed
def recv_into(self, buf, nbytes=0):
- if six.PY2:
- return self._recv_into_py2(buf, nbytes)
-
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
@@ -195,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase):
self.sock = npipe_socket
def close(self):
- super(NpipeFileIOBase, self).close()
+ super().close()
self.sock = None
def fileno(self):
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py
index a761ef5..8e6beb2 100644
--- a/docker/transport/sshconn.py
+++ b/docker/transport/sshconn.py
@@ -1,6 +1,7 @@
import paramiko
+import queue
+import urllib.parse
import requests.adapters
-import six
import logging
import os
import signal
@@ -10,10 +11,7 @@ import subprocess
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
+import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
@@ -25,7 +23,7 @@ RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
- super(SSHSocket, self).__init__(
+ super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
@@ -53,7 +51,7 @@ class SSHSocket(socket.socket):
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
- env = dict(os.environ)
+ env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
@@ -65,7 +63,7 @@ class SSHSocket(socket.socket):
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
- preexec_fn=preexec_func)
+ preexec_fn=None if constants.IS_WINDOWS_PLATFORM else preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
@@ -90,8 +88,7 @@ class SSHSocket(socket.socket):
def makefile(self, mode):
if not self.proc:
self.connect()
- if six.PY3:
- self.proc.stdout.channel = self
+ self.proc.stdout.channel = self
return self.proc.stdout
@@ -103,9 +100,9 @@ class SSHSocket(socket.socket):
self.proc.terminate()
-class SSHConnection(httplib.HTTPConnection, object):
+class SSHConnection(httplib.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
- super(SSHConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
@@ -129,7 +126,7 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
- super(SSHConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
@@ -152,7 +149,7 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
- except six.moves.queue.Empty:
+ except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
@@ -188,12 +185,12 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(SSHHTTPAdapter, self).__init__()
+ super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
- base_url = six.moves.urllib_parse.urlparse(base_url)
+ base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
@@ -205,7 +202,6 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
- self.ssh_conf = host_config
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
self.ssh_conf['proxycommand']
@@ -213,9 +209,11 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
- self.ssh_params['port'] = self.ssh_conf['port']
+ self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
- self.ssh_params['username'] = self.ssh_conf['user']
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
@@ -252,6 +250,6 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
return pool
def close(self):
- super(SSHHTTPAdapter, self).close()
+ super().close()
if self.ssh_client:
self.ssh_client.close()
diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py
index 12de76c..31e3014 100644
--- a/docker/transport/ssladapter.py
+++ b/docker/transport/ssladapter.py
@@ -36,7 +36,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
- super(SSLHTTPAdapter, self).__init__(**kwargs)
+ super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
@@ -59,7 +59,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter):
But we still need to take care of when there is a proxy poolmanager
"""
- conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
+ conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index 3e040c5..1b00762 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,7 +1,6 @@
-import six
import requests.adapters
import socket
-from six.moves import http_client as httplib
+import http.client as httplib
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
@@ -15,27 +14,15 @@ except ImportError:
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-class UnixHTTPResponse(httplib.HTTPResponse, object):
- def __init__(self, sock, *args, **kwargs):
- disable_buffering = kwargs.pop('disable_buffering', False)
- if six.PY2:
- # FIXME: We may need to disable buffering on Py3 as well,
- # but there's no clear way to do it at the moment. See:
- # https://github.com/docker/docker-py/issues/1799
- kwargs['buffering'] = not disable_buffering
- super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
-
-
-class UnixHTTPConnection(httplib.HTTPConnection, object):
+class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
- super(UnixHTTPConnection, self).__init__(
+ super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
- self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -44,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
self.sock = sock
def putheader(self, header, *values):
- super(UnixHTTPConnection, self).putheader(header, *values)
- if header == 'Connection' and 'Upgrade' in values:
- self.disable_buffering = True
+ super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
- if self.disable_buffering:
- kwargs['disable_buffering'] = True
-
- return UnixHTTPResponse(sock, *args, **kwargs)
+ return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
- super(UnixHTTPConnectionPool, self).__init__(
+ super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
@@ -89,7 +71,7 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
- super(UnixHTTPAdapter, self).__init__()
+ super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
diff --git a/docker/types/base.py b/docker/types/base.py
index 6891062..8851f1e 100644
--- a/docker/types/base.py
+++ b/docker/types/base.py
@@ -1,7 +1,4 @@
-import six
-
-
class DictType(dict):
def __init__(self, init):
- for k, v in six.iteritems(init):
+ for k, v in init.items():
self[k] = v
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 9fa4656..f1b60b2 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
@@ -10,7 +8,7 @@ from .base import DictType
from .healthcheck import Healthcheck
-class LogConfigTypesEnum(object):
+class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
@@ -61,7 +59,7 @@ class LogConfig(DictType):
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
- super(LogConfig, self).__init__({
+ super().__init__({
'Type': log_driver_type,
'Config': config
})
@@ -117,13 +115,13 @@ class Ulimit(DictType):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
- if not isinstance(name, six.string_types):
+ if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
- super(Ulimit, self).__init__({
+ super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
@@ -184,7 +182,7 @@ class DeviceRequest(DictType):
if driver is None:
driver = ''
- elif not isinstance(driver, six.string_types):
+ elif not isinstance(driver, str):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
@@ -203,7 +201,7 @@ class DeviceRequest(DictType):
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
- super(DeviceRequest, self).__init__({
+ super().__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
@@ -297,7 +295,7 @@ class HostConfig(dict):
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
- if isinstance(shm_size, six.string_types):
+ if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
@@ -358,7 +356,7 @@ class HostConfig(dict):
self['Devices'] = parse_devices(devices)
if group_add:
- self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+ self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
@@ -378,11 +376,11 @@ class HostConfig(dict):
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- self['Sysctls'][k] = six.text_type(v)
+ for k, v in sysctls.items():
+ self['Sysctls'][k] = str(v)
if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
+ if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
@@ -404,7 +402,7 @@ class HostConfig(dict):
if isinstance(lxc_conf, dict):
formatted = []
- for k, v in six.iteritems(lxc_conf):
+ for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
@@ -559,7 +557,7 @@ class HostConfig(dict):
self["PidsLimit"] = pids_limit
if isolation:
- if not isinstance(isolation, six.string_types):
+ if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
@@ -609,7 +607,7 @@ class HostConfig(dict):
self['CpuPercent'] = cpu_percent
if nano_cpus:
- if not isinstance(nano_cpus, six.integer_types):
+ if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
@@ -699,17 +697,17 @@ class ContainerConfig(dict):
'version 1.29'
)
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
- if isinstance(entrypoint, six.string_types):
+ if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
+ labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
@@ -720,10 +718,10 @@ class ContainerConfig(dict):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
- if isinstance(volumes, six.string_types):
+ if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
@@ -752,7 +750,7 @@ class ContainerConfig(dict):
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': six.text_type(user) if user is not None else None,
+ 'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index af3e5bc..10e8101 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -8,7 +8,7 @@ except ImportError:
from ..errors import DockerException
-class CancellableStream(object):
+class CancellableStream:
"""
Stream wrapper for real-time events, logs, etc. from the server.
@@ -32,7 +32,7 @@ class CancellableStream(object):
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
- except socket.error:
+ except OSError:
raise StopIteration
next = __next__
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
index 9815018..dfc88a9 100644
--- a/docker/types/healthcheck.py
+++ b/docker/types/healthcheck.py
@@ -1,7 +1,5 @@
from .base import DictType
-import six
-
class Healthcheck(DictType):
"""
@@ -31,7 +29,7 @@ class Healthcheck(DictType):
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
- if isinstance(test, six.string_types):
+ if isinstance(test, str):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
@@ -39,7 +37,7 @@ class Healthcheck(DictType):
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
- super(Healthcheck, self).__init__({
+ super().__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
@@ -53,7 +51,7 @@ class Healthcheck(DictType):
@test.setter
def test(self, value):
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
value = ["CMD-SHELL", value]
self['Test'] = value
diff --git a/docker/types/services.py b/docker/types/services.py
index 29498e9..fe7cc26 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,5 +1,3 @@
-import six
-
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import (
@@ -112,16 +110,21 @@ class ContainerSpec(dict):
containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
+ cap_add (:py:class:`list`): A list of kernel capabilities to add to the
+ default set for the container.
+ cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
+ the default set for the container.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None, groups=None,
open_stdin=None, read_only=None, stop_signal=None,
healthcheck=None, hosts=None, dns_config=None, configs=None,
- privileges=None, isolation=None, init=None):
+ privileges=None, isolation=None, init=None, cap_add=None,
+ cap_drop=None):
self['Image'] = image
- if isinstance(command, six.string_types):
+ if isinstance(command, str):
command = split_command(command)
self['Command'] = command
self['Args'] = args
@@ -151,7 +154,7 @@ class ContainerSpec(dict):
if mounts is not None:
parsed_mounts = []
for mount in mounts:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
@@ -188,6 +191,18 @@ class ContainerSpec(dict):
if init is not None:
self['Init'] = init
+ if cap_add is not None:
+ if not isinstance(cap_add, list):
+ raise TypeError('cap_add must be a list')
+
+ self['CapabilityAdd'] = cap_add
+
+ if cap_drop is not None:
+ if not isinstance(cap_drop, list):
+ raise TypeError('cap_drop must be a list')
+
+ self['CapabilityDrop'] = cap_drop
+
class Mount(dict):
"""
@@ -224,7 +239,7 @@ class Mount(dict):
self['Source'] = source
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
raise errors.InvalidArgument(
- 'Unsupported mount type: "{}"'.format(type)
+ f'Unsupported mount type: "{type}"'
)
self['Type'] = type
self['ReadOnly'] = read_only
@@ -260,7 +275,7 @@ class Mount(dict):
elif type == 'tmpfs':
tmpfs_opts = {}
if tmpfs_mode:
- if not isinstance(tmpfs_mode, six.integer_types):
+ if not isinstance(tmpfs_mode, int):
raise errors.InvalidArgument(
'tmpfs_mode must be an integer'
)
@@ -280,7 +295,7 @@ class Mount(dict):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
- 'Invalid mount format "{0}"'.format(string)
+ f'Invalid mount format "{string}"'
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
@@ -347,7 +362,7 @@ def _convert_generic_resources_dict(generic_resources):
' (found {})'.format(type(generic_resources))
)
resources = []
- for kind, value in six.iteritems(generic_resources):
+ for kind, value in generic_resources.items():
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
@@ -443,7 +458,7 @@ class RollbackConfig(UpdateConfig):
pass
-class RestartConditionTypesEnum(object):
+class RestartConditionTypesEnum:
_values = (
'none',
'on-failure',
@@ -474,7 +489,7 @@ class RestartPolicy(dict):
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
- 'Invalid RestartPolicy condition {0}'.format(condition)
+ f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
@@ -533,7 +548,7 @@ def convert_service_ports(ports):
)
result = []
- for k, v in six.iteritems(ports):
+ for k, v in ports.items():
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
diff --git a/docker/utils/build.py b/docker/utils/build.py
index 5787cab..ac06043 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -4,8 +4,6 @@ import re
import tarfile
import tempfile
-import six
-
from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
@@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
- extra_names = set(e[0] for e in extra_files)
+ extra_names = {e[0] for e in extra_files}
for path in files:
if path in extra_names:
# Extra files override context files with the same name
@@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
- except IOError:
- raise IOError(
- 'Can not read file in context: {}'.format(full_path)
+ except OSError:
+ raise OSError(
+ f'Can not read file in context: {full_path}'
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
@@ -119,12 +117,8 @@ def mkbuildcontext(dockerfile):
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
- if six.PY3:
- raise TypeError('Please use io.BytesIO to create in-memory '
- 'Dockerfiles with Python 3')
- else:
- dfinfo.size = len(dockerfile.getvalue())
- dockerfile.seek(0)
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
@@ -154,7 +148,7 @@ def walk(root, patterns, default=True):
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
-class PatternMatcher(object):
+class PatternMatcher:
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
@@ -212,13 +206,12 @@ class PatternMatcher(object):
break
if skip:
continue
- for sub in rec_walk(cur):
- yield sub
+ yield from rec_walk(cur)
return rec_walk(root)
-class Pattern(object):
+class Pattern:
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
diff --git a/docker/utils/config.py b/docker/utils/config.py
index 82a0e2a..8e24959 100644
--- a/docker/utils/config.py
+++ b/docker/utils/config.py
@@ -18,11 +18,11 @@ def find_config_file(config_path=None):
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
- log.debug("Trying paths: {0}".format(repr(paths)))
+ log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
- log.debug("Found file at path: {0}".format(path))
+ log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
@@ -57,7 +57,7 @@ def load_general_config(config_path=None):
try:
with open(config_file) as f:
return json.load(f)
- except (IOError, ValueError) as e:
+ except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index c975d4b..cf1baf4 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -27,7 +27,7 @@ def minimum_version(version):
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
- '{0} is not available for version < {1}'.format(
+ '{} is not available for version < {}'.format(
f.__name__, version
)
)
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
index cc940a2..90e9f60 100644
--- a/docker/utils/fnmatch.py
+++ b/docker/utils/fnmatch.py
@@ -108,7 +108,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
+ res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
index addffdf..f384175 100644
--- a/docker/utils/json_stream.py
+++ b/docker/utils/json_stream.py
@@ -1,11 +1,6 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import json
import json.decoder
-import six
-
from ..errors import StreamParseError
@@ -20,7 +15,7 @@ def stream_as_text(stream):
instead of byte streams.
"""
for data in stream:
- if not isinstance(data, six.text_type):
+ if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
@@ -46,8 +41,8 @@ def json_stream(stream):
return split_buffer(stream, json_splitter, json_decoder.decode)
-def line_splitter(buffer, separator=u'\n'):
- index = buffer.find(six.text_type(separator))
+def line_splitter(buffer, separator='\n'):
+ index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
@@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
of the input.
"""
splitter = splitter or line_splitter
- buffered = six.text_type('')
+ buffered = ''
for data in stream_as_text(stream):
buffered += data
diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index a50cc02..e813936 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -3,7 +3,7 @@ import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
- r"((?P<host>[a-fA-F\d.:]+):)?" # Address
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
@@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
if not end:
return [start + proto]
if randomly_available_port:
- return ['{}-{}'.format(start, end) + proto]
+ return [f'{start}-{end}' + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 7ba9505..4a2076e 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -4,8 +4,6 @@ import select
import socket as pysocket
import struct
-import six
-
try:
from ..transport import NpipeSocket
except ImportError:
@@ -27,16 +25,16 @@ def read(socket, n=4096):
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
- if six.PY3 and not isinstance(socket, NpipeSocket):
+ if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
- if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
- except EnvironmentError as e:
+ except OSError as e:
if e.errno not in recoverable_errors:
raise
@@ -46,7 +44,7 @@ def read_exactly(socket, n):
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
- data = six.binary_type()
+ data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
@@ -134,7 +132,7 @@ def consume_socket_output(frames, demux=False):
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
- return six.binary_type().join(frames)
+ return bytes().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
@@ -166,4 +164,4 @@ def demux_adaptor(stream_id, data):
elif stream_id == STDERR:
return (None, data)
else:
- raise ValueError('{0} is not a valid stream'.format(stream_id))
+ raise ValueError(f'{stream_id} is not a valid stream')
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 1b195e2..f7c3dd7 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -7,8 +7,6 @@ import string
from datetime import datetime
from distutils.version import StrictVersion
-import six
-
from .. import errors
from .. import tls
from ..constants import DEFAULT_HTTP_HOST
@@ -16,11 +14,7 @@ from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
-if six.PY2:
- from urllib import splitnport
- from urlparse import urlparse
-else:
- from urllib.parse import splitnport, urlparse
+from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
@@ -39,8 +33,7 @@ def create_ipam_config(*args, **kwargs):
def decode_json_header(header):
data = base64.b64decode(header)
- if six.PY3:
- data = data.decode('utf-8')
+ data = data.decode('utf-8')
return json.loads(data)
@@ -80,7 +73,7 @@ def _convert_port_binding(binding):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
- elif isinstance(binding[0], six.string_types):
+ elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
@@ -104,7 +97,7 @@ def _convert_port_binding(binding):
def convert_port_bindings(port_bindings):
result = {}
- for k, v in six.iteritems(port_bindings):
+ for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
@@ -121,7 +114,7 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
- if isinstance(k, six.binary_type):
+ if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
@@ -132,7 +125,7 @@ def convert_volume_binds(binds):
)
bind = v['bind']
- if isinstance(bind, six.binary_type):
+ if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
@@ -143,13 +136,13 @@ def convert_volume_binds(binds):
mode = 'rw'
result.append(
- six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ f'{k}:{bind}:{mode}'
)
else:
- if isinstance(v, six.binary_type):
+ if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
- six.text_type('{0}:{1}:rw').format(k, v)
+ f'{k}:{v}:rw'
)
return result
@@ -166,7 +159,7 @@ def convert_tmpfs_mounts(tmpfs):
result = {}
for mount in tmpfs:
- if isinstance(mount, six.string_types):
+ if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
@@ -191,7 +184,7 @@ def convert_service_networks(networks):
result = []
for n in networks:
- if isinstance(n, six.string_types):
+ if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
@@ -240,14 +233,14 @@ def parse_host(addr, is_win32=False, tls=False):
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
- "Invalid bind address protocol: {}".format(addr)
+ f"Invalid bind address protocol: {addr}"
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
- 'Invalid bind address format: {}'.format(addr)
+ f'Invalid bind address format: {addr}'
)
if any([
@@ -255,7 +248,7 @@ def parse_host(addr, is_win32=False, tls=False):
parsed_url.password
]):
raise errors.DockerException(
- 'Invalid bind address format: {}'.format(addr)
+ f'Invalid bind address format: {addr}'
)
if parsed_url.path and proto == 'ssh':
@@ -292,8 +285,8 @@ def parse_host(addr, is_win32=False, tls=False):
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
- return "{}://{}".format(proto, path).rstrip('/')
- return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
+ return f"{proto}://{path}".rstrip('/')
+ return f'{proto}://{host}:{port}{path}'.rstrip('/')
def parse_devices(devices):
@@ -302,9 +295,9 @@ def parse_devices(devices):
if isinstance(device, dict):
device_list.append(device)
continue
- if not isinstance(device, six.string_types):
+ if not isinstance(device, str):
raise errors.DockerException(
- 'Invalid device type {0}'.format(type(device))
+ f'Invalid device type {type(device)}'
)
device_mapping = device.split(':')
if device_mapping:
@@ -372,13 +365,13 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
def convert_filters(filters):
result = {}
- for k, v in six.iteritems(filters):
+ for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
- str(item) if not isinstance(item, six.string_types) else item
+ str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
@@ -391,7 +384,7 @@ def datetime_to_timestamp(dt):
def parse_bytes(s):
- if isinstance(s, six.integer_types + (float,)):
+ if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
@@ -415,7 +408,7 @@ def parse_bytes(s):
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
- 'Failed converting the string value for memory ({0}) to'
+ 'Failed converting the string value for memory ({}) to'
' an integer.'.format(digits_part)
)
@@ -423,7 +416,7 @@ def parse_bytes(s):
s = int(digits * units[suffix])
else:
raise errors.DockerException(
- 'The specified value for memory ({0}) should specify the'
+ 'The specified value for memory ({}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
@@ -433,9 +426,9 @@ def parse_bytes(s):
def normalize_links(links):
if isinstance(links, dict):
- links = six.iteritems(links)
+ links = iter(links.items())
- return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
+ return [f'{k}:{v}' if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
@@ -445,7 +438,7 @@ def parse_env_file(env_file):
"""
environment = {}
- with open(env_file, 'r') as f:
+ with open(env_file) as f:
for line in f:
if line[0] == '#':
@@ -461,15 +454,13 @@ def parse_env_file(env_file):
environment[k] = v
else:
raise errors.DockerException(
- 'Invalid line in environment file {0}:\n{1}'.format(
+ 'Invalid line in environment file {}:\n{}'.format(
env_file, line))
return environment
def split_command(command):
- if six.PY2 and not isinstance(command, six.binary_type):
- command = command.encode('utf-8')
return shlex.split(command)
@@ -477,22 +468,22 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
- if isinstance(value, six.binary_type):
+ if isinstance(value, bytes):
value = value.decode('utf-8')
- return u'{key}={value}'.format(key=key, value=value)
- return [format_env(*var) for var in six.iteritems(environment)]
+ return f'{key}={value}'
+ return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
- '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
]
return [
- '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
+ f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
]
diff --git a/docker/version.py b/docker/version.py
index f576846..4259432 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "4.4.4"
-version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
+version = "5.0.3"
+version_info = tuple(int(d) for d in version.split("-")[0].split("."))