summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
authorOndřej Nový <novy@ondrej.org>2016-03-23 07:23:09 +0100
committerOndřej Nový <novy@ondrej.org>2016-03-23 07:23:09 +0100
commit63875477fa2e0a529fcf52e36a3f9cb3db861000 (patch)
tree4cd4d02c10be1a2125e1d1409312d643191a5ada /docker
parent0db60a1e341b25c36bebb5903f0ce87126d7eee4 (diff)
Import python-docker_1.7.2.orig.tar.gz
Diffstat (limited to 'docker')
-rw-r--r--docker/api/build.py11
-rw-r--r--docker/api/container.py37
-rw-r--r--docker/api/exec_api.py19
-rw-r--r--docker/api/image.py5
-rw-r--r--docker/api/network.py30
-rw-r--r--docker/api/volume.py5
-rw-r--r--docker/auth/auth.py194
-rw-r--r--docker/client.py34
-rw-r--r--docker/constants.py2
-rw-r--r--docker/errors.py4
-rw-r--r--docker/ssladapter/ssladapter.py26
-rw-r--r--docker/tls.py38
-rw-r--r--docker/unixconn/unixconn.py14
-rw-r--r--docker/utils/__init__.py7
-rw-r--r--docker/utils/decorators.py11
-rw-r--r--docker/utils/utils.py403
-rw-r--r--docker/version.py2
17 files changed, 548 insertions, 294 deletions
diff --git a/docker/api/build.py b/docker/api/build.py
index b303ba6..6bfaba1 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,6 +1,7 @@
import logging
import os
import re
+import json
from .. import constants
from .. import errors
@@ -16,7 +17,7 @@ class BuildApiMixin(object):
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False):
+ decode=False, buildargs=None):
remote = context = headers = None
container_limits = container_limits or {}
if path is None and fileobj is None:
@@ -71,6 +72,14 @@ class BuildApiMixin(object):
}
params.update(container_limits)
+ if buildargs:
+ if utils.version_gte(self._version, '1.21'):
+ params.update({'buildargs': json.dumps(buildargs)})
+ else:
+ raise errors.InvalidVersion(
+ 'buildargs was only introduced in API version 1.21'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
diff --git a/docker/api/container.py b/docker/api/container.py
index 142bd0f..ceac173 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,8 +1,10 @@
import six
import warnings
+from datetime import datetime
from .. import errors
from .. import utils
+from ..utils.utils import create_networking_config, create_endpoint_config
class ContainerApiMixin(object):
@@ -96,7 +98,8 @@ class ContainerApiMixin(object):
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
- mac_address=None, labels=None, volume_driver=None):
+ mac_address=None, labels=None, volume_driver=None,
+ stop_signal=None, networking_config=None):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
@@ -111,7 +114,7 @@ class ContainerApiMixin(object):
tty, mem_limit, ports, environment, dns, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
- volume_driver
+ volume_driver, stop_signal, networking_config,
)
return self.create_container_from_config(config, name)
@@ -137,6 +140,12 @@ class ContainerApiMixin(object):
kwargs['version'] = self._version
return utils.create_host_config(*args, **kwargs)
+ def create_networking_config(self, *args, **kwargs):
+ return create_networking_config(*args, **kwargs)
+
+ def create_endpoint_config(self, *args, **kwargs):
+ return create_endpoint_config(self._version, *args, **kwargs)
+
@utils.check_resource
def diff(self, container):
return self._result(
@@ -184,7 +193,7 @@ class ContainerApiMixin(object):
@utils.check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
- timestamps=False, tail='all'):
+ timestamps=False, tail='all', since=None):
if utils.compare_version('1.11', self._version) >= 0:
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
@@ -192,9 +201,20 @@ class ContainerApiMixin(object):
'follow': stream and 1 or 0,
}
if utils.compare_version('1.13', self._version) >= 0:
- if tail != 'all' and (not isinstance(tail, int) or tail <= 0):
+ if tail != 'all' and (not isinstance(tail, int) or tail < 0):
tail = 'all'
params['tail'] = tail
+
+ if since is not None:
+ if utils.compare_version('1.19', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'since is not supported in API < 1.19'
+ )
+ else:
+ if isinstance(since, datetime):
+ params['since'] = utils.datetime_to_timestamp(since)
+ elif (isinstance(since, int) and since > 0):
+ params['since'] = since
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
@@ -344,9 +364,14 @@ class ContainerApiMixin(object):
@utils.minimum_version('1.17')
@utils.check_resource
- def stats(self, container, decode=None):
+ def stats(self, container, decode=None, stream=True):
url = self._url("/containers/{0}/stats", container)
- return self._stream_helper(self._get(url, stream=True), decode=decode)
+ if stream:
+ return self._stream_helper(self._get(url, stream=True),
+ decode=decode)
+ else:
+ return self._result(self._get(url, params={'stream': False}),
+ json=True)
@utils.check_resource
def stop(self, container, timeout=10):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index c66b9dd..f0e4afa 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,5 +1,3 @@
-import shlex
-
import six
from .. import errors
@@ -9,8 +7,8 @@ from .. import utils
class ExecApiMixin(object):
@utils.minimum_version('1.15')
@utils.check_resource
- def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False,
- privileged=False, user=''):
+ def exec_create(self, container, cmd, stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=False, user=''):
if privileged and utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'Privileged exec is not supported in API < 1.19'
@@ -20,14 +18,14 @@ class ExecApiMixin(object):
'User-specific exec is not supported in API < 1.19'
)
if isinstance(cmd, six.string_types):
- cmd = shlex.split(str(cmd))
+ cmd = utils.split_command(cmd)
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
- 'AttachStdin': False,
+ 'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd
@@ -55,7 +53,11 @@ class ExecApiMixin(object):
self._raise_for_status(res)
@utils.minimum_version('1.15')
- def exec_start(self, exec_id, detach=False, tty=False, stream=False):
+ def exec_start(self, exec_id, detach=False, tty=False, stream=False,
+ socket=False):
+ # we want opened socket if socket == True
+ if socket:
+ stream = True
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -67,4 +69,7 @@ class ExecApiMixin(object):
res = self._post_json(
self._url('/exec/{0}/start', exec_id), data=data, stream=stream
)
+
+ if socket:
+ return self._get_raw_response_socket(res)
return self._get_result_tty(stream, res, tty)
diff --git a/docker/api/image.py b/docker/api/image.py
index f891e21..8493b38 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -158,8 +158,6 @@ class ImageApiMixin(object):
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
- if repo_name.count(":") == 1:
- repository, tag = repository.rsplit(":", 1)
params = {
'tag': tag,
@@ -174,7 +172,8 @@ class ImageApiMixin(object):
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
- "No auth config in memory - loading from filesystem")
+ "No auth config in memory - loading from filesystem"
+ )
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
diff --git a/docker/api/network.py b/docker/api/network.py
index 2dea679..d9a6128 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -1,6 +1,6 @@
import json
-from ..utils import check_resource, minimum_version
+from ..utils import check_resource, minimum_version, normalize_links
class NetworkApiMixin(object):
@@ -19,10 +19,15 @@ class NetworkApiMixin(object):
return self._result(res, json=True)
@minimum_version('1.21')
- def create_network(self, name, driver=None):
+ def create_network(self, name, driver=None, options=None, ipam=None):
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
data = {
- 'name': name,
- 'driver': driver,
+ 'Name': name,
+ 'Driver': driver,
+ 'Options': options,
+ 'IPAM': ipam,
}
url = self._url("/networks/create")
res = self._post_json(url, data=data)
@@ -42,14 +47,23 @@ class NetworkApiMixin(object):
@check_resource
@minimum_version('1.21')
- def connect_container_to_network(self, container, net_id):
- data = {"container": container}
+ def connect_container_to_network(self, container, net_id,
+ aliases=None, links=None):
+ data = {
+ "Container": container,
+ "EndpointConfig": {
+ "Aliases": aliases,
+ "Links": normalize_links(links) if links else None,
+ },
+ }
url = self._url("/networks/{0}/connect", net_id)
- self._post_json(url, data=data)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
@check_resource
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id):
data = {"container": container}
url = self._url("/networks/{0}/disconnect", net_id)
- self._post_json(url, data=data)
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
diff --git a/docker/api/volume.py b/docker/api/volume.py
index e9e7127..bb8b39b 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -5,14 +5,14 @@ class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def volumes(self, filters=None):
params = {
- 'filter': utils.convert_filters(filters) if filters else None
+ 'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.21')
def create_volume(self, name, driver=None, driver_opts=None):
- url = self._url('/volumes')
+ url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
@@ -33,4 +33,3 @@ class VolumeApiMixin(object):
url = self._url('/volumes/{0}', name)
resp = self._delete(url)
self._raise_for_status(resp)
- return True
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 1ee9f81..eedb794 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -13,18 +13,15 @@
# limitations under the License.
import base64
-import fileinput
import json
import logging
import os
-import warnings
import six
-from .. import constants
from .. import errors
-INDEX_NAME = 'index.docker.io'
+INDEX_NAME = 'docker.io'
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
@@ -32,31 +29,36 @@ LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
-def resolve_repository_name(repo_name, insecure=False):
- if insecure:
- warnings.warn(
- constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
- 'resolve_repository_name()'
- ), DeprecationWarning
- )
-
+def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name))
- parts = repo_name.split('/', 1)
- if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
- # This is a docker index repo (ex: foo/bar or ubuntu)
- return INDEX_NAME, repo_name
- if len(parts) < 2:
- raise errors.InvalidRepository(
- 'Invalid repository name ({0})'.format(repo_name))
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ )
- if 'index.docker.io' in parts[0]:
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid repository name, try "{0}" instead'.format(parts[1])
+ 'Invalid index name ({0}). Cannot begin or end with a'
+ ' hyphen.'.format(index_name)
)
+ return resolve_index_name(index_name), remote_name
+
- return parts[0], parts[1]
+def resolve_index_name(index_name):
+ index_name = convert_to_hostname(index_name)
+ if index_name == 'index.' + INDEX_NAME:
+ index_name = INDEX_NAME
+ return index_name
+
+
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
def resolve_authconfig(authconfig, registry=None):
@@ -67,7 +69,7 @@ def resolve_authconfig(authconfig, registry=None):
Returns None if no match was found.
"""
# Default to the public index server
- registry = convert_to_hostname(registry) if registry else INDEX_NAME
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
if registry in authconfig:
@@ -75,7 +77,7 @@ def resolve_authconfig(authconfig, registry=None):
return authconfig[registry]
for key, config in six.iteritems(authconfig):
- if convert_to_hostname(key) == registry:
+ if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
return config
@@ -87,17 +89,12 @@ def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
-def encode_auth(auth_info):
- return base64.b64encode(auth_info.get('username', '') + b':' +
- auth_info.get('password', ''))
-
-
def decode_auth(auth):
if isinstance(auth, six.string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
- return login.decode('ascii'), pwd.decode('ascii')
+ return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
@@ -105,12 +102,14 @@ def encode_header(auth):
return base64.urlsafe_b64encode(auth_json)
-def parse_auth(entries):
+def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
- entries: Dict of authentication entries.
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
Returns:
Authentication registry.
@@ -118,6 +117,19 @@ def parse_auth(entries):
conf = {}
for registry, entry in six.iteritems(entries):
+ if not (isinstance(entry, dict) and 'auth' in entry):
+ log.debug(
+ 'Config entry for key {0} is not auth config'.format(registry)
+ )
+ # We sometimes fall back to parsing the whole config as if it was
+ # the auth config by itself, for legacy purposes. In that case, we
+ # fail silently and return an empty conf if any of the keys is not
+ # formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(registry)
+ )
+ return {}
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
@@ -126,84 +138,90 @@ def parse_auth(entries):
conf[registry] = {
'username': username,
'password': password,
- 'email': entry['email'],
+ 'email': entry.get('email'),
'serveraddress': registry,
}
return conf
+def find_config_file(config_path=None):
+ environment_path = os.path.join(
+ os.environ.get('DOCKER_CONFIG'),
+ os.path.basename(DOCKER_CONFIG_FILENAME)
+ ) if os.environ.get('DOCKER_CONFIG') else None
+
+ paths = [
+ config_path, # 1
+ environment_path, # 2
+ os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(
+ os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
+ ) # 4
+ ]
+
+ for path in paths:
+ if path and os.path.exists(path):
+ return path
+ return None
+
+
def load_config(config_path=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment variable >
+ ~/.docker/config.json > ~/.dockercfg
"""
- conf = {}
- data = None
-
- # Prefer ~/.docker/config.json.
- config_file = config_path or os.path.join(os.path.expanduser('~'),
- DOCKER_CONFIG_FILENAME)
-
- log.debug("Trying {0}".format(config_file))
-
- if os.path.exists(config_file):
- try:
- with open(config_file) as f:
- for section, data in six.iteritems(json.load(f)):
- if section != 'auths':
- continue
- log.debug("Found 'auths' section")
- return parse_auth(data)
- log.debug("Couldn't find 'auths' section")
- except (IOError, KeyError, ValueError) as e:
- # Likely missing new Docker config file or it's in an
- # unknown format, continue to attempt to read old location
- # and format.
- log.debug(e)
- pass
- else:
- log.debug("File doesn't exist")
-
- config_file = config_path or os.path.join(os.path.expanduser('~'),
- LEGACY_DOCKER_CONFIG_FILENAME)
-
- log.debug("Trying {0}".format(config_file))
+ config_file = find_config_file(config_path)
- if not os.path.exists(config_file):
- log.debug("File doesn't exist - returning empty config")
+ if not config_file:
+ log.debug("File doesn't exist")
return {}
- log.debug("Attempting to parse as JSON")
try:
with open(config_file) as f:
- return parse_auth(json.load(f))
- except Exception as e:
+ data = json.load(f)
+ res = {}
+ if data.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update(parse_auth(data['auths'], raise_on_error=True))
+ if data.get('HttpHeaders'):
+ log.debug("Found 'HttpHeaders' section")
+ res.update({'HttpHeaders': data['HttpHeaders']})
+ if res:
+ return res
+ else:
+ log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
+ f.seek(0)
+ return parse_auth(json.load(f))
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
log.debug(e)
- pass
- # If that fails, we assume the configuration file contains a single
- # authentication token for the public registry in the following format:
- #
- # auth = AUTH_TOKEN
- # email = email@domain.com
log.debug("Attempting to parse legacy auth file format")
try:
data = []
- for line in fileinput.input(config_file):
- data.append(line.strip().split(' = ')[1])
- if len(data) < 2:
- # Not enough data
- raise errors.InvalidConfigFile(
- 'Invalid or empty configuration file!')
+ with open(config_file) as f:
+ for line in f.readlines():
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!'
+ )
username, password = decode_auth(data[0])
- conf[INDEX_NAME] = {
- 'username': username,
- 'password': password,
- 'email': data[1],
- 'serveraddress': INDEX_URL,
+ return {
+ INDEX_NAME: {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
+ }
}
- return conf
except Exception as e:
log.debug(e)
pass
diff --git a/docker/client.py b/docker/client.py
index d219472..7d1f7c4 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -28,7 +28,7 @@ from . import errors
from .auth import auth
from .unixconn import unixconn
from .ssladapter import ssladapter
-from .utils import utils, check_resource
+from .utils import utils, check_resource, update_headers
from .tls import TLSConfig
@@ -45,17 +45,17 @@ class Client(
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
super(Client, self).__init__()
- if tls and not base_url.startswith('https://'):
+ if tls and not base_url:
raise errors.TLSParameterError(
- 'If using TLS, the base_url argument must begin with '
- '"https://".')
+ 'If using TLS, the base_url argument must be provided.'
+ )
self.base_url = base_url
self.timeout = timeout
self._auth_configs = auth.load_config()
- base_url = utils.parse_host(base_url, sys.platform)
+ base_url = utils.parse_host(base_url, sys.platform, tls=bool(tls))
if base_url.startswith('http+unix://'):
self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
@@ -103,15 +103,19 @@ class Client(
kwargs.setdefault('timeout', self.timeout)
return kwargs
+ @update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
+ @update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
+ @update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
+ @update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
@@ -188,6 +192,8 @@ class Client(
self._raise_for_status(response)
if six.PY3:
sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
@@ -244,10 +250,7 @@ class Client(
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
- if six.PY3:
- socket._sock.settimeout(None)
- else:
- socket.settimeout(None)
+ self._disable_socket_timeout(socket)
while True:
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
@@ -276,6 +279,19 @@ class Client(
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it.
+
+ To avoid missing the correct one, we try both.
+ """
+ if hasattr(socket, "settimeout"):
+ socket.settimeout(None)
+ if hasattr(socket, "_sock") and hasattr(socket._sock, "settimeout"):
+ socket._sock.settimeout(None)
+
def _get_result(self, container, stream, res):
cont = self.inspect_container(container)
return self._get_result_tty(stream, res, cont['Config']['Tty'])
diff --git a/docker/constants.py b/docker/constants.py
index 3647a3b..0627ba0 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,4 +1,4 @@
-DEFAULT_DOCKER_API_VERSION = '1.20'
+DEFAULT_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
diff --git a/docker/errors.py b/docker/errors.py
index 066406a..e85910c 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -80,8 +80,8 @@ class TLSParameterError(DockerException):
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
- "http://docs.docker.com/examples/https/ for "
- "API details.")
+ "https://docs.docker.com/engine/articles/https/ "
+ "for API details.")
class NullResource(DockerException, ValueError):
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
index 3a70a91..5b43aa2 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -4,7 +4,6 @@
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
-import ssl
try:
import requests.packages.urllib3 as urllib3
@@ -14,20 +13,10 @@ except ImportError:
PoolManager = urllib3.poolmanager.PoolManager
-def get_max_tls_protocol():
- protocols = ('PROTOCOL_TLSv1_2',
- 'PROTOCOL_TLSv1_1',
- 'PROTOCOL_TLSv1')
- for proto in protocols:
- if hasattr(ssl, proto):
- return getattr(ssl, proto)
-
-
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
- ssl_version = ssl_version or get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -41,11 +30,24 @@ class SSLAdapter(HTTPAdapter):
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
- if self.can_override_ssl_version():
+ if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
+ def get_connection(self, *args, **kwargs):
+ """
+ Ensure assert_hostname is set correctly on our pool
+
+ We already take care of a normal poolmanager via init_poolmanager
+
+ But we still need to take care of when there is a proxy poolmanager
+ """
+ conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+ if conn.assert_hostname != self.assert_hostname:
+ conn.assert_hostname = self.assert_hostname
+ return conn
+
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
diff --git a/docker/tls.py b/docker/tls.py
index d888b7d..83b0ff7 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -6,6 +6,7 @@ from .ssladapter import ssladapter
class TLSConfig(object):
cert = None
+ ca_cert = None
verify = None
ssl_version = None
@@ -13,16 +14,11 @@ class TLSConfig(object):
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with
- # http://docs.docker.com/examples/https/
+ # https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
- # urllib3 sets a default ssl_version if ssl_version is None,
- # but that default is the vulnerable PROTOCOL_SSLv23 selection,
- # so we override the default with the maximum supported in the running
- # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)
- ssl_version = ssl_version or ssladapter.get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
@@ -48,29 +44,25 @@ class TLSConfig(object):
)
self.cert = (tls_cert, tls_key)
- # Either set verify to True (public/default CA checks) or to the
- # path of a CA Cert file.
- if verify is not None:
- if not ca_cert:
- self.verify = verify
- elif os.path.isfile(ca_cert):
- if not verify:
- raise errors.TLSParameterError(
- 'verify can not be False when a CA cert is'
- ' provided.'
- )
- self.verify = ca_cert
- else:
- raise errors.TLSParameterError(
- 'Invalid CA certificate provided for `tls_ca_cert`.'
- )
+ # If verify is set, make sure the cert exists
+ self.verify = verify
+ self.ca_cert = ca_cert
+ if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+ raise errors.TLSParameterError(
+ 'Invalid CA certificate provided for `tls_ca_cert`.'
+ )
def configure_client(self, client):
client.ssl_version = self.ssl_version
- if self.verify is not None:
+
+ if self.verify and self.ca_cert:
+ client.verify = self.ca_cert
+ else:
client.verify = self.verify
+
if self.cert:
client.cert = self.cert
+
client.mount('https://', ssladapter.SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
diff --git a/docker/unixconn/unixconn.py b/docker/unixconn/unixconn.py
index 551bd29..d7e249e 100644
--- a/docker/unixconn/unixconn.py
+++ b/docker/unixconn/unixconn.py
@@ -73,12 +73,20 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
if pool:
return pool
- pool = UnixHTTPConnectionPool(url,
- self.socket_path,
- self.timeout)
+ pool = UnixHTTPConnectionPool(
+ url, self.socket_path, self.timeout
+ )
self.pools[url] = pool
return pool
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
+
def close(self):
self.pools.clear()
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 92e03e9..ccc3819 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,10 +1,11 @@
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
- kwargs_from_env, convert_filters, create_host_config,
+ kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config,
create_container_config, parse_bytes, ping_registry, parse_env_file,
- version_lt, version_gte, decode_json_header
+ version_lt, version_gte, decode_json_header, split_command,
+ create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
) # flake8: noqa
from .types import Ulimit, LogConfig # flake8: noqa
-from .decorators import check_resource, minimum_version #flake8: noqa
+from .decorators import check_resource, minimum_version, update_headers #flake8: noqa
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 7d3b01a..7c41a5f 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -35,3 +35,14 @@ def minimum_version(version):
return f(self, *args, **kwargs)
return wrapper
return decorator
+
+
+def update_headers(f):
+ def inner(self, *args, **kwargs):
+ if 'HttpHeaders' in self._auth_configs:
+ if 'headers' not in kwargs:
+ kwargs['headers'] = self._auth_configs['HttpHeaders']
+ else:
+ kwargs['headers'].update(self._auth_configs['HttpHeaders'])
+ return f(self, *args, **kwargs)
+ return inner
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 89837b7..6fcf037 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -44,6 +44,23 @@ BYTE_UNITS = {
}
+def create_ipam_pool(subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ return {
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ }
+
+
+def create_ipam_config(driver='default', pool_configs=None):
+ return {
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ }
+
+
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
@@ -74,9 +91,10 @@ def decode_json_header(header):
return json.loads(data)
-def tar(path, exclude=None, dockerfile=None):
- f = tempfile.NamedTemporaryFile()
- t = tarfile.open(mode='w', fileobj=f)
+def tar(path, exclude=None, dockerfile=None, fileobj=None):
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=fileobj)
root = os.path.abspath(path)
exclude = exclude or []
@@ -85,8 +103,8 @@ def tar(path, exclude=None, dockerfile=None):
t.add(os.path.join(root, path), arcname=path, recursive=False)
t.close()
- f.seek(0)
- return f
+ fileobj.seek(0)
+ return fileobj
def exclude_paths(root, patterns, dockerfile=None):
@@ -107,38 +125,74 @@ def exclude_paths(root, patterns, dockerfile=None):
exclude_patterns = list(set(patterns) - set(exceptions))
- all_paths = get_paths(root)
-
- # Remove all paths that are matched by any exclusion pattern
- paths = [
- p for p in all_paths
- if not any(match_path(p, pattern) for pattern in exclude_patterns)
- ]
-
- # Add back the set of paths that are matched by any inclusion pattern.
- # Include parent dirs - if we add back 'foo/bar', add 'foo' as well
- for p in all_paths:
- if any(match_path(p, pattern) for pattern in include_patterns):
- components = p.split('/')
- paths += [
- '/'.join(components[:end])
- for end in range(1, len(components) + 1)
- ]
+ paths = get_paths(root, exclude_patterns, include_patterns,
+ has_exceptions=len(exceptions) > 0)
+
+ return set(paths).union(
+ # If the Dockerfile is in a subdirectory that is excluded, get_paths
+ # will not descend into it and the file will be skipped. This ensures
+ # it doesn't happen.
+ set([dockerfile])
+ if os.path.exists(os.path.join(root, dockerfile)) else set()
+ )
- return set(paths)
+def should_include(path, exclude_patterns, include_patterns):
+ """
+ Given a path, a list of exclude patterns, and a list of inclusion patterns:
-def get_paths(root):
+ 1. Returns True if the path doesn't match any exclusion pattern
+ 2. Returns False if the path matches an exclusion pattern and doesn't match
+ an inclusion pattern
+ 3. Returns true if the path matches an exclusion pattern and matches an
+ inclusion pattern
+ """
+ for pattern in exclude_patterns:
+ if match_path(path, pattern):
+ for pattern in include_patterns:
+ if match_path(path, pattern):
+ return True
+ return False
+ return True
+
+
+def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
- for parent, dirs, files in os.walk(root, followlinks=False):
+ for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
+
+ # If exception rules exist, we can't skip recursing into ignored
+ # directories, as we need to look for exceptions in them.
+ #
+ # It may be possible to optimize this further for exception patterns
+ # that *couldn't* match within ignored directores.
+ #
+ # This matches the current docker logic (as of 2015-11-24):
+ # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
+
+ if not has_exceptions:
+
+ # Remove excluded patterns from the list of directories to traverse
+ # by mutating the dirs we're iterating over.
+ # This looks strange, but is considered the correct way to skip
+ # traversal. See https://docs.python.org/2/library/os.html#os.walk
+
+ dirs[:] = [d for d in dirs if
+ should_include(os.path.join(parent, d),
+ exclude_patterns, include_patterns)]
+
for path in dirs:
- paths.append(os.path.join(parent, path))
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
for path in files:
- paths.append(os.path.join(parent, path))
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
return paths
@@ -236,7 +290,7 @@ def convert_port_bindings(port_bindings):
for k, v in six.iteritems(port_bindings):
key = str(k)
if '/' not in key:
- key = key + '/tcp'
+ key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
@@ -283,23 +337,21 @@ def convert_volume_binds(binds):
return result
-def parse_repository_tag(repo):
- column_index = repo.rfind(':')
- if column_index < 0:
- return repo, None
- tag = repo[column_index + 1:]
- slash_index = tag.find('/')
- if slash_index < 0:
- return repo[:column_index], tag
-
- return repo, None
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
-def parse_host(addr, platform=None):
+def parse_host(addr, platform=None, tls=False):
proto = "http+unix"
host = DEFAULT_HTTP_HOST
port = None
@@ -335,7 +387,7 @@ def parse_host(addr, platform=None):
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
- proto = "http"
+ proto = "https" if tls else "http"
if proto != "http+unix" and ":" in addr:
host_parts = addr.split(':')
@@ -354,7 +406,7 @@ def parse_host(addr, platform=None):
port = int(port)
except Exception:
raise errors.DockerException(
- "Invalid port: %s", addr
+ "Invalid port: {0}".format(addr)
)
elif proto in ("http", "https") and ':' not in addr:
@@ -371,7 +423,14 @@ def parse_host(addr, platform=None):
def parse_devices(devices):
device_list = []
for device in devices:
- device_mapping = device.split(":")
+ if isinstance(device, dict):
+ device_list.append(device)
+ continue
+ if not isinstance(device, six.string_types):
+ raise errors.DockerException(
+ 'Invalid device type {0}'.format(type(device))
+ )
+ device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
@@ -382,34 +441,55 @@ def parse_devices(devices):
permissions = device_mapping[2]
else:
permissions = 'rwm'
- device_list.append({"PathOnHost": path_on_host,
- "PathInContainer": path_in_container,
- "CgroupPermissions": permissions})
+ device_list.append({
+ 'PathOnHost': path_on_host,
+ 'PathInContainer': path_in_container,
+ 'CgroupPermissions': permissions
+ })
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None):
host = os.environ.get('DOCKER_HOST')
- cert_path = os.environ.get('DOCKER_CERT_PATH')
+
+ # empty string for cert path is the same as unset.
+ cert_path = os.environ.get('DOCKER_CERT_PATH') or None
+
+ # empty string for tls verify counts as "false".
+ # Any value or 'unset' counts as true.
tls_verify = os.environ.get('DOCKER_TLS_VERIFY')
+ if tls_verify == '':
+ tls_verify = False
+ enable_tls = True
+ else:
+ tls_verify = tls_verify is not None
+ enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = (host.replace('tcp://', 'https://')
- if tls_verify else host)
+ if enable_tls else host)
+
+ if not enable_tls:
+ return params
- if tls_verify and not cert_path:
+ if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
- if tls_verify and cert_path:
- params['tls'] = tls.TLSConfig(
- client_cert=(os.path.join(cert_path, 'cert.pem'),
- os.path.join(cert_path, 'key.pem')),
- ca_cert=os.path.join(cert_path, 'ca.pem'),
- verify=True,
- ssl_version=ssl_version,
- assert_hostname=assert_hostname)
+ if not tls_verify and assert_hostname is None:
+ # assert_hostname is a subset of TLS verification,
+ # so if it's not set already then set it to false.
+ assert_hostname = False
+
+ params['tls'] = tls.TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=tls_verify,
+ ssl_version=ssl_version,
+ assert_hostname=assert_hostname,
+ )
return params
@@ -431,12 +511,18 @@ def datetime_to_timestamp(dt):
return delta.seconds + delta.days * 24 * 3600
+def longint(n):
+ if six.PY3:
+ return int(n)
+ return long(n)
+
+
def parse_bytes(s):
if len(s) == 0:
s = 0
else:
if s[-2:-1].isalpha() and s[-1].isalpha():
- if (s[-1] == "b" or s[-1] == "B"):
+ if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
@@ -451,34 +537,53 @@ def parse_bytes(s):
if suffix in units.keys() or suffix.isdigit():
try:
- digits = int(digits_part)
+ digits = longint(digits_part)
except ValueError:
- message = ('Failed converting the string value for'
- 'memory ({0}) to a number.')
- formatted_message = message.format(digits_part)
- raise errors.DockerException(formatted_message)
+ raise errors.DockerException(
+ 'Failed converting the string value for memory ({0}) to'
+ ' an integer.'.format(digits_part)
+ )
- s = digits * units[suffix]
+ # Reconvert to long for the final result
+ s = longint(digits * units[suffix])
else:
- message = ('The specified value for memory'
- ' ({0}) should specify the units. The postfix'
- ' should be one of the `b` `k` `m` `g`'
- ' characters')
- raise errors.DockerException(message.format(s))
+ raise errors.DockerException(
+ 'The specified value for memory ({0}) should specify the'
+ ' units. The postfix should be one of the `b` `k` `m` `g`'
+ ' characters'.format(s)
+ )
return s
-def create_host_config(
- binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=False, links=None, privileged=False,
- dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None, devices=None,
- extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None, log_config=None, mem_limit=None,
- memswap_limit=None, cgroup_parent=None, group_add=None, cpu_quota=None,
- cpu_period=None, version=None
-):
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
+ publish_all_ports=False, links=None, privileged=False,
+ dns=None, dns_search=None, volumes_from=None,
+ network_mode=None, restart_policy=None, cap_add=None,
+ cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_swappiness=None,
+ cgroup_parent=None, group_add=None, cpu_quota=None,
+ cpu_period=None, oom_kill_disable=False, shm_size=None,
+ version=None):
+
host_config = {}
if not version:
@@ -491,17 +596,33 @@ def create_host_config(
if mem_limit is not None:
if isinstance(mem_limit, six.string_types):
mem_limit = parse_bytes(mem_limit)
+
host_config['Memory'] = mem_limit
if memswap_limit is not None:
if isinstance(memswap_limit, six.string_types):
memswap_limit = parse_bytes(memswap_limit)
+
host_config['MemorySwap'] = memswap_limit
+ if mem_swappiness is not None:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('mem_swappiness', '1.20')
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ host_config['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, six.string_types):
+ shm_size = parse_bytes(shm_size)
+
+ host_config['ShmSize'] = shm_size
+
if pid_mode not in (None, 'host'):
- raise errors.DockerException(
- 'Invalid value for pid param: {0}'.format(pid_mode)
- )
+ raise host_config_value_error('pid_mode', pid_mode)
elif pid_mode:
host_config['PidMode'] = pid_mode
@@ -511,6 +632,12 @@ def create_host_config(
if privileged:
host_config['Privileged'] = privileged
+ if oom_kill_disable:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('oom_kill_disable', '1.19')
+
+ host_config['OomKillDisable'] = oom_kill_disable
+
if publish_all_ports:
host_config['PublishAllPorts'] = publish_all_ports
@@ -526,6 +653,11 @@ def create_host_config(
host_config['NetworkMode'] = 'default'
if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
host_config['RestartPolicy'] = restart_policy
if cap_add:
@@ -539,9 +671,8 @@ def create_host_config(
if group_add:
if version_lt(version, '1.20'):
- raise errors.InvalidVersion(
- 'group_add param not supported for API version < 1.20'
- )
+ raise host_config_version_error('group_add', '1.20')
+
host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
@@ -549,24 +680,21 @@ def create_host_config(
if security_opt is not None:
if not isinstance(security_opt, list):
- raise errors.DockerException(
- 'Invalid type for security_opt param: expected list but found'
- ' {0}'.format(type(security_opt))
- )
+ raise host_config_type_error('security_opt', security_opt, 'list')
+
host_config['SecurityOpt'] = security_opt
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
+
host_config['VolumesFrom'] = volumes_from
if binds is not None:
host_config['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
- host_config['PortBindings'] = convert_port_bindings(
- port_bindings
- )
+ host_config['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
@@ -578,14 +706,7 @@ def create_host_config(
host_config['ExtraHosts'] = extra_hosts
if links is not None:
- if isinstance(links, dict):
- links = six.iteritems(links)
-
- formatted_links = [
- '{0}:{1}'.format(k, v) for k, v in sorted(links)
- ]
-
- host_config['Links'] = formatted_links
+ host_config['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
@@ -601,10 +722,7 @@ def create_host_config(
if ulimits is not None:
if not isinstance(ulimits, list):
- raise errors.DockerException(
- 'Invalid type for ulimits param: expected list but found'
- ' {0}'.format(type(ulimits))
- )
+ raise host_config_type_error('ulimits', ulimits, 'list')
host_config['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
@@ -614,40 +732,64 @@ def create_host_config(
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
- raise errors.DockerException(
- 'Invalid type for log_config param: expected LogConfig but'
- ' found {0}'.format(type(log_config))
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
+
host_config['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
- raise TypeError(
- 'Invalid type for cpu_quota param: expected int but'
- ' found {0}'.format(type(cpu_quota))
- )
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
- raise errors.InvalidVersion(
- 'cpu_quota param not supported for API version < 1.19'
- )
+ raise host_config_version_error('cpu_quota', '1.19')
+
host_config['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
- raise TypeError(
- 'Invalid type for cpu_period param: expected int but'
- ' found {0}'.format(type(cpu_period))
- )
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
- raise errors.InvalidVersion(
- 'cpu_period param not supported for API version < 1.19'
- )
+ raise host_config_version_error('cpu_period', '1.19')
+
host_config['CpuPeriod'] = cpu_period
return host_config
+def normalize_links(links):
+ if isinstance(links, dict):
+ links = six.iteritems(links)
+
+ return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
+
+
+def create_networking_config(endpoints_config=None):
+ networking_config = {}
+
+ if endpoints_config:
+ networking_config["EndpointsConfig"] = endpoints_config
+
+ return networking_config
+
+
+def create_endpoint_config(version, aliases=None, links=None):
+ endpoint_config = {}
+
+ if aliases:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('endpoint_config.aliases', '1.22')
+ endpoint_config["Aliases"] = aliases
+
+ if links:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('endpoint_config.links', '1.22')
+ endpoint_config["Links"] = normalize_links(links)
+
+ return endpoint_config
+
+
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
@@ -673,19 +815,25 @@ def parse_env_file(env_file):
return environment
+def split_command(command):
+ if six.PY2 and not isinstance(command, six.binary_type):
+ command = command.encode('utf-8')
+ return shlex.split(command)
+
+
def create_container_config(
version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None, network_disabled=False,
entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
- labels=None, volume_driver=None
+ labels=None, volume_driver=None, stop_signal=None, networking_config=None,
):
if isinstance(command, six.string_types):
- command = shlex.split(str(command))
+ command = split_command(command)
if isinstance(entrypoint, six.string_types):
- entrypoint = shlex.split(str(entrypoint))
+ entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = [
@@ -698,6 +846,11 @@ def create_container_config(
'labels were only introduced in API version 1.18'
)
+ if stop_signal is not None and compare_version('1.21', version) < 0:
+ raise errors.InvalidVersion(
+ 'stop_signal was only introduced in API version 1.21'
+ )
+
if compare_version('1.19', version) < 0:
if volume_driver is not None:
raise errors.InvalidVersion(
@@ -768,7 +921,7 @@ def create_container_config(
if compare_version('1.10', version) >= 0:
message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to start()')
+ ' It has been moved to host_config')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
@@ -800,7 +953,9 @@ def create_container_config(
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
+ 'StopSignal': stop_signal
}
diff --git a/docker/version.py b/docker/version.py
index 4ebafdd..f98f42a 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.5.0"
+version = "1.7.2"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])