summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
authorSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 11:55:21 -0700
committerSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 11:55:21 -0700
commit0c74f7ce9339080d4f27ee39365bc109bada0efb (patch)
tree05ed2be1f5c3a96255baeaa638d2e477ce894ab7 /docker
parent7cd7ca7e903ee2dfb1a91ec118f8653b2310ec7d (diff)
Imported Upstream version 1.2.2
Diffstat (limited to 'docker')
-rw-r--r--docker/__init__.py4
-rw-r--r--docker/auth/__init__.py2
-rw-r--r--docker/auth/auth.py72
-rw-r--r--docker/client.py825
-rw-r--r--docker/constants.py6
-rw-r--r--docker/errors.py14
-rw-r--r--docker/ssladapter/ssladapter.py18
-rw-r--r--docker/tls.py12
-rw-r--r--docker/unixconn/unixconn.py53
-rw-r--r--docker/utils/__init__.py7
-rw-r--r--docker/utils/decorators.py16
-rw-r--r--docker/utils/ports/__init__.py4
-rw-r--r--docker/utils/ports/ports.py84
-rw-r--r--docker/utils/types.py100
-rw-r--r--docker/utils/utils.py368
-rw-r--r--docker/version.py3
16 files changed, 1191 insertions, 397 deletions
diff --git a/docker/__init__.py b/docker/__init__.py
index 343766d..3844c81 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from .version import version
+from .version import version, version_info
__version__ = version
__title__ = 'docker-py'
-from .client import Client # flake8: noqa
+from .client import Client, AutoVersionClient # flake8: noqa
diff --git a/docker/auth/__init__.py b/docker/auth/__init__.py
index 66acdb3..d068b7f 100644
--- a/docker/auth/__init__.py
+++ b/docker/auth/__init__.py
@@ -3,5 +3,5 @@ from .auth import (
encode_header,
load_config,
resolve_authconfig,
- resolve_repository_name
+ resolve_repository_name,
) # flake8: noqa \ No newline at end of file
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index c13eed4..373df56 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -26,23 +26,13 @@ INDEX_URL = 'https://index.docker.io/v1/'
DOCKER_CONFIG_FILENAME = '.dockercfg'
-def swap_protocol(url):
- if url.startswith('http://'):
- return url.replace('http://', 'https://', 1)
- if url.startswith('https://'):
- return url.replace('https://', 'http://', 1)
- return url
-
-
def expand_registry_url(hostname, insecure=False):
if hostname.startswith('http:') or hostname.startswith('https:'):
- if '/' not in hostname[9:]:
- hostname = hostname + '/v1/'
return hostname
- if utils.ping('https://' + hostname + '/v1/_ping'):
- return 'https://' + hostname + '/v1/'
+ if utils.ping_registry('https://' + hostname):
+ return 'https://' + hostname
elif insecure:
- return 'http://' + hostname + '/v1/'
+ return 'http://' + hostname
else:
raise errors.DockerException(
"HTTPS endpoint unresponsive and insecure mode isn't enabled."
@@ -61,7 +51,7 @@ def resolve_repository_name(repo_name, insecure=False):
raise errors.InvalidRepository(
'Invalid repository name ({0})'.format(repo_name))
- if 'index.docker.io' in parts[0] or 'registry.hub.docker.com' in parts[0]:
+ if 'index.docker.io' in parts[0]:
raise errors.InvalidRepository(
'Invalid repository name, try "{0}" instead'.format(parts[1])
)
@@ -70,29 +60,27 @@ def resolve_repository_name(repo_name, insecure=False):
def resolve_authconfig(authconfig, registry=None):
- """Return the authentication data from the given auth configuration for a
- specific registry. We'll do our best to infer the correct URL for the
- registry, trying both http and https schemes. Returns an empty dictionnary
- if no data exists."""
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the config
+ with full URLs are stripped down to hostnames before checking for a match.
+ Returns None if no match was found.
+ """
# Default to the public index server
- registry = registry or INDEX_URL
-
- # Ff its not the index server there are three cases:
- #
- # 1. this is a full config url -> it should be used as is
- # 2. it could be a full url, but with the wrong protocol
- # 3. it can be the hostname optionally with a port
- #
- # as there is only one auth entry which is fully qualified we need to start
- # parsing and matching
- if '/' not in registry:
- registry = registry + '/v1/'
- if not registry.startswith('http:') and not registry.startswith('https:'):
- registry = 'https://' + registry
+ registry = convert_to_hostname(registry) if registry else INDEX_URL
if registry in authconfig:
return authconfig[registry]
- return authconfig.get(swap_protocol(registry), None)
+
+ for key, config in six.iteritems(authconfig):
+ if convert_to_hostname(key) == registry:
+ return config
+
+ return None
+
+
+def convert_to_hostname(url):
+ return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def encode_auth(auth_info):
@@ -104,7 +92,7 @@ def decode_auth(auth):
if isinstance(auth, six.string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
- login, pwd = s.split(b':')
+ login, pwd = s.split(b':', 1)
return login.decode('ascii'), pwd.decode('ascii')
@@ -119,14 +107,20 @@ def encode_full_header(auth):
return encode_header({'configs': auth})
-def load_config(root=None):
- """Loads authentication data from a Docker configuration file in the given
- root directory."""
+def load_config(config_path=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ """
conf = {}
data = None
- config_file = os.path.join(root or os.environ.get('HOME', '.'),
- DOCKER_CONFIG_FILENAME)
+ config_file = config_path or os.path.join(os.environ.get('HOME', '.'),
+ DOCKER_CONFIG_FILENAME)
+
+ # if config path doesn't exist return empty config
+ if not os.path.exists(config_file):
+ return {}
# First try as JSON
try:
diff --git a/docker/client.py b/docker/client.py
index 4ff91d8..2d1349c 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -17,56 +17,86 @@ import os
import re
import shlex
import struct
-from socket import socket as socket_obj
import warnings
+from datetime import datetime
import requests
import requests.exceptions
import six
+from . import constants
+from . import errors
from .auth import auth
from .unixconn import unixconn
from .ssladapter import ssladapter
-from .utils import utils
-from . import errors
+from .utils import utils, check_resource
from .tls import TLSConfig
+
if not six.PY3:
import websocket
-DEFAULT_DOCKER_API_VERSION = '1.12'
-DEFAULT_TIMEOUT_SECONDS = 60
-STREAM_HEADER_SIZE_BYTES = 8
-
class Client(requests.Session):
- def __init__(self, base_url=None, version=DEFAULT_DOCKER_API_VERSION,
- timeout=DEFAULT_TIMEOUT_SECONDS, tls=False):
+ def __init__(self, base_url=None, version=None,
+ timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
super(Client, self).__init__()
- base_url = utils.parse_host(base_url)
- if 'http+unix:///' in base_url:
- base_url = base_url.replace('unix:/', 'unix:')
+
if tls and not base_url.startswith('https://'):
raise errors.TLSParameterError(
'If using TLS, the base_url argument must begin with '
'"https://".')
+
self.base_url = base_url
- self._version = version
- self._timeout = timeout
+ self.timeout = timeout
+
self._auth_configs = auth.load_config()
- # Use SSLAdapter for the ability to specify SSL version
- if isinstance(tls, TLSConfig):
- tls.configure_client(self)
- elif tls:
- self.mount('https://', ssladapter.SSLAdapter())
+ base_url = utils.parse_host(base_url)
+ if base_url.startswith('http+unix://'):
+ unix_socket_adapter = unixconn.UnixAdapter(base_url, timeout)
+ self.mount('http+docker://', unix_socket_adapter)
+ self.base_url = 'http+docker://localunixsocket'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self.mount('https://', ssladapter.SSLAdapter())
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None:
+ self._version = constants.DEFAULT_DOCKER_API_VERSION
+ elif isinstance(version, six.string_types):
+ if version.lower() == 'auto':
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
else:
- self.mount('http+unix://', unixconn.UnixAdapter(base_url, timeout))
+ raise errors.DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise errors.DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise errors.DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
- kwargs.setdefault('timeout', self._timeout)
+ kwargs.setdefault('timeout', self.timeout)
return kwargs
def _post(self, url, **kwargs):
@@ -78,8 +108,11 @@ class Client(requests.Session):
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
- def _url(self, path):
- return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
+ def _url(self, path, versioned_api=True):
+ if versioned_api:
+ return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
+ else:
+ return '{0}{1}'.format(self.base_url, path)
def _raise_for_status(self, response, explanation=None):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -98,129 +131,6 @@ class Client(requests.Session):
return response.content
return response.text
- def _container_config(self, image, command, hostname=None, user=None,
- detach=False, stdin_open=False, tty=False,
- mem_limit=0, ports=None, environment=None, dns=None,
- volumes=None, volumes_from=None,
- network_disabled=False, entrypoint=None,
- cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=0):
- if isinstance(command, six.string_types):
- command = shlex.split(str(command))
- if isinstance(environment, dict):
- environment = [
- '{0}={1}'.format(k, v) for k, v in environment.items()
- ]
-
- if isinstance(mem_limit, six.string_types):
- if len(mem_limit) == 0:
- mem_limit = 0
- else:
- units = {'b': 1,
- 'k': 1024,
- 'm': 1024 * 1024,
- 'g': 1024 * 1024 * 1024}
- suffix = mem_limit[-1].lower()
-
- # Check if the variable is a string representation of an int
- # without a units part. Assuming that the units are bytes.
- if suffix.isdigit():
- digits_part = mem_limit
- suffix = 'b'
- else:
- digits_part = mem_limit[:-1]
-
- if suffix in units.keys() or suffix.isdigit():
- try:
- digits = int(digits_part)
- except ValueError:
- message = ('Failed converting the string value for'
- ' mem_limit ({0}) to a number.')
- formatted_message = message.format(digits_part)
- raise errors.DockerException(formatted_message)
-
- mem_limit = digits * units[suffix]
- else:
- message = ('The specified value for mem_limit parameter'
- ' ({0}) should specify the units. The postfix'
- ' should be one of the `b` `k` `m` `g`'
- ' characters')
- raise errors.DockerException(message.format(mem_limit))
-
- if isinstance(ports, list):
- exposed_ports = {}
- for port_definition in ports:
- port = port_definition
- proto = 'tcp'
- if isinstance(port_definition, tuple):
- if len(port_definition) == 2:
- proto = port_definition[1]
- port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
- ports = exposed_ports
-
- if isinstance(volumes, six.string_types):
- volumes = [volumes, ]
-
- if isinstance(volumes, list):
- volumes_dict = {}
- for vol in volumes:
- volumes_dict[vol] = {}
- volumes = volumes_dict
-
- if volumes_from:
- if not isinstance(volumes_from, six.string_types):
- volumes_from = ','.join(volumes_from)
- else:
- # Force None, an empty list or dict causes client.start to fail
- volumes_from = None
-
- attach_stdin = False
- attach_stdout = False
- attach_stderr = False
- stdin_once = False
-
- if not detach:
- attach_stdout = True
- attach_stderr = True
-
- if stdin_open:
- attach_stdin = True
- stdin_once = True
-
- if utils.compare_version('1.10', self._version) >= 0:
- message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to start()')
- if dns is not None:
- raise errors.DockerException(message.format('dns'))
- if volumes_from is not None:
- raise errors.DockerException(message.format('volumes_from'))
-
- return {
- 'Hostname': hostname,
- 'Domainname': domainname,
- 'ExposedPorts': ports,
- 'User': user,
- 'Tty': tty,
- 'OpenStdin': stdin_open,
- 'StdinOnce': stdin_once,
- 'Memory': mem_limit,
- 'AttachStdin': attach_stdin,
- 'AttachStdout': attach_stdout,
- 'AttachStderr': attach_stderr,
- 'Env': environment,
- 'Cmd': command,
- 'Dns': dns,
- 'Image': image,
- 'Volumes': volumes,
- 'VolumesFrom': volumes_from,
- 'NetworkDisabled': network_disabled,
- 'Entrypoint': entrypoint,
- 'CpuShares': cpu_shares,
- 'WorkingDir': working_dir,
- 'MemorySwap': memswap_limit
- }
-
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
@@ -242,6 +152,7 @@ class Client(requests.Session):
'stream': 1
}
+ @check_resource
def _attach_websocket(self, container, params=None):
if six.PY3:
raise NotImplementedError("This method is not currently supported "
@@ -259,40 +170,41 @@ class Client(requests.Session):
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if six.PY3:
- sock = response.raw._fp.fp.raw._sock
+ sock = response.raw._fp.fp.raw
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
- # collected. If the response is garbage collected, it will close
- # TLS sockets.
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
sock._response = response
except AttributeError:
- # UNIX sockets can't have attributes set on them, but that's fine
- # because we won't be doing TLS over them
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
pass
return sock
- def _stream_helper(self, response):
+ def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
- socket_fp = socket_obj(_sock=self._get_raw_response_socket(response))
- socket_fp.setblocking(1)
- socket = socket_fp.makefile()
- while True:
- # Because Docker introduced newlines at the end of chunks in v0.9,
- # and only on some API endpoints, we have to cater for both cases.
- size_line = socket.readline()
- if size_line == '\r\n' or size_line == '\n':
- size_line = socket.readline()
-
- size = int(size_line, 16)
- if size <= 0:
- break
- data = socket.readline()
- if not data:
- break
- yield data
+ if response.raw._fp.chunked:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ if decode:
+ if six.PY3:
+ data = data.decode('utf-8')
+ data = json.loads(data)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
@@ -303,43 +215,40 @@ class Client(requests.Session):
if len(buf[walker:]) < 8:
break
_, length = struct.unpack_from('>BxxxL', buf[walker:])
- start = walker + STREAM_HEADER_SIZE_BYTES
+ start = walker + constants.STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
- def _multiplexed_socket_stream_helper(self, response):
+ def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
- socket."""
- socket = self._get_raw_response_socket(response)
-
- def recvall(socket, size):
- blocks = []
- while size > 0:
- block = socket.recv(size)
- if not block:
- return None
-
- blocks.append(block)
- size -= len(block)
+ stream."""
- sep = bytes() if six.PY3 else str()
- data = sep.join(blocks)
- return data
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ if six.PY3:
+ socket._sock.settimeout(None)
+ else:
+ socket.settimeout(None)
while True:
- socket.settimeout(None)
- header = recvall(socket, STREAM_HEADER_SIZE_BYTES)
+ header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
break
- data = recvall(socket, length)
+ data = response.raw.read(length)
if not data:
break
yield data
+ @property
+ def api_version(self):
+ return self._version
+
+ @check_resource
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
if isinstance(container, dict):
@@ -369,9 +278,14 @@ class Client(requests.Session):
sep = bytes() if six.PY3 else str()
- return stream and self._multiplexed_socket_stream_helper(response) or \
- sep.join([x for x in self._multiplexed_buffer_helper(response)])
+ if stream:
+ return self._multiplexed_response_stream_helper(response)
+ else:
+ return sep.join(
+ [x for x in self._multiplexed_buffer_helper(response)]
+ )
+ @check_resource
def attach_socket(self, container, params=None, ws=False):
if params is None:
params = {
@@ -392,11 +306,19 @@ class Client(requests.Session):
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, stream=False, timeout=None,
- custom_context=False, encoding=None):
+ custom_context=False, encoding=None, pull=True,
+ forcerm=False, dockerfile=None, container_limits=None):
remote = context = headers = None
+ container_limits = container_limits or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
+ for key in container_limits.keys():
+ if key not in constants.CONTAINER_LIMITS_KEYS:
+ raise errors.DockerException(
+ 'Invalid container_limits key {0}'.format(key)
+ )
+
if custom_context:
if not fileobj:
raise TypeError("You must specify fileobj with custom_context")
@@ -406,25 +328,42 @@ class Client(requests.Session):
elif path.startswith(('http://', 'https://',
'git://', 'github.com/')):
remote = path
+ elif not os.path.isdir(path):
+ raise TypeError("You must specify a directory to build in path")
else:
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
- exclude = list(filter(bool, f.read().split('\n')))
+ exclude = list(filter(bool, f.read().splitlines()))
+ # These are handled by the docker daemon and should not be
+ # excluded on the client
+ if 'Dockerfile' in exclude:
+ exclude.remove('Dockerfile')
+ if '.dockerignore' in exclude:
+ exclude.remove(".dockerignore")
context = utils.tar(path, exclude=exclude)
if utils.compare_version('1.8', self._version) >= 0:
stream = True
+ if dockerfile and utils.compare_version('1.17', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'dockerfile was only introduced in API version 1.17'
+ )
+
u = self._url('/build')
params = {
't': tag,
'remote': remote,
'q': quiet,
'nocache': nocache,
- 'rm': rm
+ 'rm': rm,
+ 'forcerm': forcerm,
+ 'pull': pull,
+ 'dockerfile': dockerfile,
}
+ params.update(container_limits)
if context is not None:
headers = {'Content-Type': 'application/tar'}
@@ -440,6 +379,8 @@ class Client(requests.Session):
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
+ if headers is None:
+ headers = {}
headers['X-Registry-Config'] = auth.encode_full_header(
self._auth_configs
)
@@ -453,7 +394,7 @@ class Client(requests.Session):
timeout=timeout,
)
- if context is not None:
+ if context is not None and not custom_context:
context.close()
if stream:
@@ -466,8 +407,11 @@ class Client(requests.Session):
return None, output
return match.group(1), output
+ @check_resource
def commit(self, container, repository=None, tag=None, message=None,
author=None, conf=None):
+ if isinstance(container, dict):
+ container = container.get('Id')
params = {
'container': container,
'repo': repository,
@@ -479,8 +423,9 @@ class Client(requests.Session):
return self._result(self._post_json(u, data=conf, params=params),
json=True)
- def containers(self, quiet=False, all=False, trunc=True, latest=False,
- since=None, before=None, limit=-1, size=False):
+ def containers(self, quiet=False, all=False, trunc=False, latest=False,
+ since=None, before=None, limit=-1, size=False,
+ filters=None):
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
@@ -489,13 +434,19 @@ class Client(requests.Session):
'since': since,
'before': before
}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
u = self._url("/containers/json")
res = self._result(self._get(u, params=params), True)
if quiet:
return [{'Id': x['Id']} for x in res]
+ if trunc:
+ for x in res:
+ x['Id'] = x['Id'][:12]
return res
+ @check_resource
def copy(self, container, resource):
if isinstance(container, dict):
container = container.get('Id')
@@ -513,15 +464,22 @@ class Client(requests.Session):
volumes=None, volumes_from=None,
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=0):
+ memswap_limit=0, cpuset=None, host_config=None,
+ mac_address=None, labels=None):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
- config = self._container_config(
- image, command, hostname, user, detach, stdin_open, tty, mem_limit,
- ports, environment, dns, volumes, volumes_from, network_disabled,
- entrypoint, cpu_shares, working_dir, domainname, memswap_limit
+ if host_config and utils.compare_version('1.15', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'host_config is not supported in API < 1.15'
+ )
+
+ config = utils.create_container_config(
+ self._version, image, command, hostname, user, detach, stdin_open,
+ tty, mem_limit, ports, environment, dns, volumes, volumes_from,
+ network_disabled, entrypoint, cpu_shares, working_dir, domainname,
+ memswap_limit, cpuset, host_config, mac_address, labels
)
return self.create_container_from_config(config, name)
@@ -533,15 +491,114 @@ class Client(requests.Session):
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
+ @check_resource
def diff(self, container):
if isinstance(container, dict):
container = container.get('Id')
return self._result(self._get(self._url("/containers/{0}/changes".
format(container))), True)
- def events(self):
- return self._stream_helper(self.get(self._url('/events'), stream=True))
+ def events(self, since=None, until=None, filters=None, decode=None):
+ if isinstance(since, datetime):
+ since = utils.datetime_to_timestamp(since)
+
+ if isinstance(until, datetime):
+ until = utils.datetime_to_timestamp(until)
+
+ if filters:
+ filters = utils.convert_filters(filters)
+
+ params = {
+ 'since': since,
+ 'until': until,
+ 'filters': filters
+ }
+
+ return self._stream_helper(self.get(self._url('/events'),
+ params=params, stream=True),
+ decode=decode)
+
+ @check_resource
+ def execute(self, container, cmd, detach=False, stdout=True, stderr=True,
+ stream=False, tty=False):
+ warnings.warn(
+ 'Client.execute is being deprecated. Please use exec_create & '
+ 'exec_start instead', DeprecationWarning
+ )
+ create_res = self.exec_create(
+ container, cmd, detach, stdout, stderr, tty
+ )
+
+ return self.exec_start(create_res, detach, tty, stream)
+
+ def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False):
+ if utils.compare_version('1.15', self._version) < 0:
+ raise errors.InvalidVersion('Exec is not supported in API < 1.15')
+ if isinstance(container, dict):
+ container = container.get('Id')
+ if isinstance(cmd, six.string_types):
+ cmd = shlex.split(str(cmd))
+
+ data = {
+ 'Container': container,
+ 'User': '',
+ 'Privileged': False,
+ 'Tty': tty,
+ 'AttachStdin': False,
+ 'AttachStdout': stdout,
+ 'AttachStderr': stderr,
+ 'Cmd': cmd
+ }
+
+ url = self._url('/containers/{0}/exec'.format(container))
+ res = self._post_json(url, data=data)
+ return self._result(res, True)
+
+ def exec_inspect(self, exec_id):
+ if utils.compare_version('1.15', self._version) < 0:
+ raise errors.InvalidVersion('Exec is not supported in API < 1.15')
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+ res = self._get(self._url("/exec/{0}/json".format(exec_id)))
+ return self._result(res, True)
+
+ def exec_resize(self, exec_id, height=None, width=None):
+ if utils.compare_version('1.15', self._version) < 0:
+ raise errors.InvalidVersion('Exec is not supported in API < 1.15')
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+
+ params = {'h': height, 'w': width}
+ url = self._url("/exec/{0}/resize".format(exec_id))
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ def exec_start(self, exec_id, detach=False, tty=False, stream=False):
+ if utils.compare_version('1.15', self._version) < 0:
+ raise errors.InvalidVersion('Exec is not supported in API < 1.15')
+ if isinstance(exec_id, dict):
+ exec_id = exec_id.get('Id')
+
+ data = {
+ 'Tty': tty,
+ 'Detach': detach
+ }
+
+ res = self._post_json(self._url('/exec/{0}/start'.format(exec_id)),
+ data=data, stream=stream)
+ self._raise_for_status(res)
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ elif six.PY3:
+ return bytes().join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+ else:
+ return str().join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+ @check_resource
def export(self, container):
if isinstance(container, dict):
container = container.get('Id')
@@ -550,18 +607,20 @@ class Client(requests.Session):
self._raise_for_status(res)
return res.raw
+ @check_resource
def get_image(self, image):
res = self._get(self._url("/images/{0}/get".format(image)),
stream=True)
self._raise_for_status(res)
return res.raw
+ @check_resource
def history(self, image):
res = self._get(self._url("/images/{0}/history".format(image)))
- self._raise_for_status(res)
- return self._result(res)
+ return self._result(res, True)
- def images(self, name=None, quiet=False, all=False, viz=False):
+ def images(self, name=None, quiet=False, all=False, viz=False,
+ filters=None):
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
@@ -571,6 +630,8 @@ class Client(requests.Session):
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
@@ -578,50 +639,105 @@ class Client(requests.Session):
return res
def import_image(self, src=None, repository=None, tag=None, image=None):
+ if src:
+ if isinstance(src, six.string_types):
+ try:
+ result = self.import_image_from_file(
+ src, repository=repository, tag=tag)
+ except IOError:
+ result = self.import_image_from_url(
+ src, repository=repository, tag=tag)
+ else:
+ result = self.import_image_from_data(
+ src, repository=repository, tag=tag)
+ elif image:
+ result = self.import_image_from_image(
+ image, repository=repository, tag=tag)
+ else:
+ raise Exception("Must specify a src or image")
+
+ return result
+
+ def import_image_from_data(self, data, repository=None, tag=None):
u = self._url("/images/create")
params = {
+ 'fromSrc': '-',
'repo': repository,
'tag': tag
}
+ headers = {
+ 'Content-Type': 'application/tar',
+ }
+ return self._result(
+ self._post(u, data=data, params=params, headers=headers))
- if src:
- try:
- # XXX: this is ways not optimal but the only way
- # for now to import tarballs through the API
- fic = open(src)
- data = fic.read()
- fic.close()
- src = "-"
- except IOError:
- # file does not exists or not a file (URL)
- data = None
- if isinstance(src, six.string_types):
- params['fromSrc'] = src
- return self._result(self._post(u, data=data, params=params))
- return self._result(self._post(u, data=src, params=params))
+ def import_image_from_file(self, filename, repository=None, tag=None):
+ u = self._url("/images/create")
+ params = {
+ 'fromSrc': '-',
+ 'repo': repository,
+ 'tag': tag
+ }
+ headers = {
+ 'Content-Type': 'application/tar',
+ }
+ with open(filename, 'rb') as f:
+ return self._result(
+ self._post(u, data=f, params=params, headers=headers,
+ timeout=None))
+
+ def import_image_from_stream(self, stream, repository=None, tag=None):
+ u = self._url("/images/create")
+ params = {
+ 'fromSrc': '-',
+ 'repo': repository,
+ 'tag': tag
+ }
+ headers = {
+ 'Content-Type': 'application/tar',
+ 'Transfer-Encoding': 'chunked',
+ }
+ return self._result(
+ self._post(u, data=stream, params=params, headers=headers))
- if image:
- params['fromImage'] = image
- return self._result(self._post(u, data=None, params=params))
+ def import_image_from_url(self, url, repository=None, tag=None):
+ u = self._url("/images/create")
+ params = {
+ 'fromSrc': url,
+ 'repo': repository,
+ 'tag': tag
+ }
+ return self._result(
+ self._post(u, data=None, params=params))
- raise Exception("Must specify a src or image")
+ def import_image_from_image(self, image, repository=None, tag=None):
+ u = self._url("/images/create")
+ params = {
+ 'fromImage': image,
+ 'repo': repository,
+ 'tag': tag
+ }
+ return self._result(
+ self._post(u, data=None, params=params))
def info(self):
return self._result(self._get(self._url("/info")),
True)
+ @check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
- api_url = self._url("/images/" + image + "/insert")
+ api_url = self._url("/images/{0}/insert".fornat(image))
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
+ @check_resource
def inspect_container(self, container):
if isinstance(container, dict):
container = container.get('Id')
@@ -629,12 +745,14 @@ class Client(requests.Session):
self._get(self._url("/containers/{0}/json".format(container))),
True)
- def inspect_image(self, image_id):
+ @check_resource
+ def inspect_image(self, image):
return self._result(
- self._get(self._url("/images/{0}/json".format(image_id))),
+ self._get(self._url("/images/{0}/json".format(image))),
True
)
+ @check_resource
def kill(self, container, signal=None):
if isinstance(container, dict):
container = container.get('Id')
@@ -651,10 +769,14 @@ class Client(requests.Session):
self._raise_for_status(res)
def login(self, username, password=None, email=None, registry=None,
- reauth=False):
+ reauth=False, insecure_registry=False, dockercfg_path=None):
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
- if not self._auth_configs:
+ # If dockercfg_path is passed check to see if the config file exists,
+ # if so load that config.
+ if dockercfg_path and os.path.exists(dockercfg_path):
+ self._auth_configs = auth.load_config(dockercfg_path)
+ elif not self._auth_configs:
self._auth_configs = auth.load_config()
registry = registry or auth.INDEX_URL
@@ -678,19 +800,25 @@ class Client(requests.Session):
self._auth_configs[registry] = req_data
return self._result(response, json=True)
+ @check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
- timestamps=False):
+ timestamps=False, tail='all'):
if isinstance(container, dict):
container = container.get('Id')
if utils.compare_version('1.11', self._version) >= 0:
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
'timestamps': timestamps and 1 or 0,
- 'follow': stream and 1 or 0}
+ 'follow': stream and 1 or 0,
+ }
+ if utils.compare_version('1.13', self._version) >= 0:
+ if tail != 'all' and (not isinstance(tail, int) or tail <= 0):
+ tail = 'all'
+ params['tail'] = tail
url = self._url("/containers/{0}/logs".format(container))
res = self._get(url, params=params, stream=stream)
if stream:
- return self._multiplexed_socket_stream_helper(res)
+ return self._multiplexed_response_stream_helper(res)
elif six.PY3:
return bytes().join(
[x for x in self._multiplexed_buffer_helper(res)]
@@ -707,9 +835,18 @@ class Client(requests.Session):
logs=True
)
+ @check_resource
+ def pause(self, container):
+ if isinstance(container, dict):
+ container = container.get('Id')
+ url = self._url('/containers/{0}/pause'.format(container))
+ res = self._post(url)
+ self._raise_for_status(res)
+
def ping(self):
return self._result(self._get(self._url('/_ping')))
+ @check_resource
def port(self, container, private_port):
if isinstance(container, dict):
container = container.get('Id')
@@ -719,14 +856,20 @@ class Client(requests.Session):
s_port = str(private_port)
h_ports = None
- h_ports = json_['NetworkSettings']['Ports'].get(s_port + '/udp')
+ # Port settings is None when the container is running with
+ # network_mode=host.
+ port_settings = json_.get('NetworkSettings', {}).get('Ports')
+ if port_settings is None:
+ return None
+
+ h_ports = port_settings.get(s_port + '/udp')
if h_ports is None:
- h_ports = json_['NetworkSettings']['Ports'].get(s_port + '/tcp')
+ h_ports = port_settings.get(s_port + '/tcp')
return h_ports
def pull(self, repository, tag=None, stream=False,
- insecure_registry=False):
+ insecure_registry=False, auth_config=None):
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(
@@ -744,15 +887,21 @@ class Client(requests.Session):
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
- if not self._auth_configs:
- self._auth_configs = auth.load_config()
- authcfg = auth.resolve_authconfig(self._auth_configs, registry)
-
- # Do not fail here if no authentication exists for this specific
- # registry as we can have a readonly pull. Just put the header if
- # we can.
- if authcfg:
- headers['X-Registry-Auth'] = auth.encode_header(authcfg)
+ if auth_config is None:
+ if not self._auth_configs:
+ self._auth_configs = auth.load_config()
+ authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ headers['X-Registry-Auth'] = auth.encode_header(
+ authcfg
+ )
+ else:
+ headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(self._url('/images/create'), params=params,
headers=headers, stream=stream, timeout=None)
@@ -796,6 +945,7 @@ class Client(requests.Session):
return stream and self._stream_helper(response) \
or self._result(response)
+ @check_resource
def remove_container(self, container, v=False, link=False, force=False):
if isinstance(container, dict):
container = container.get('Id')
@@ -804,11 +954,38 @@ class Client(requests.Session):
params=params)
self._raise_for_status(res)
+ @check_resource
def remove_image(self, image, force=False, noprune=False):
+ if isinstance(image, dict):
+ image = image.get('Id')
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/" + image), params=params)
self._raise_for_status(res)
+ @check_resource
+ def rename(self, container, name):
+ if utils.compare_version('1.17', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'rename was only introduced in API version 1.17'
+ )
+ if isinstance(container, dict):
+ container = container.get('Id')
+ url = self._url("/containers/{0}/rename".format(container))
+ params = {'name': name}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @check_resource
+ def resize(self, container, height, width):
+ if isinstance(container, dict):
+ container = container.get('Id')
+
+ params = {'h': height, 'w': width}
+ url = self._url("/containers/{0}/resize".format(container))
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
+ @check_resource
def restart(self, container, timeout=10):
if isinstance(container, dict):
container = container.get('Id')
@@ -822,99 +999,92 @@ class Client(requests.Session):
params={'term': term}),
True)
+ @check_resource
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=False, links=None, privileged=False,
dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None):
- if isinstance(container, dict):
- container = container.get('Id')
-
- if isinstance(lxc_conf, dict):
- formatted = []
- for k, v in six.iteritems(lxc_conf):
- formatted.append({'Key': k, 'Value': str(v)})
- lxc_conf = formatted
+ restart_policy=None, cap_add=None, cap_drop=None, devices=None,
+ extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None):
- start_config = {
- 'LxcConf': lxc_conf
- }
- if binds:
- start_config['Binds'] = utils.convert_volume_binds(binds)
-
- if port_bindings:
- start_config['PortBindings'] = utils.convert_port_bindings(
- port_bindings
- )
-
- start_config['PublishAllPorts'] = publish_all_ports
-
- if links:
- if isinstance(links, dict):
- links = six.iteritems(links)
-
- formatted_links = [
- '{0}:{1}'.format(k, v) for k, v in sorted(links)
- ]
-
- start_config['Links'] = formatted_links
-
- start_config['Privileged'] = privileged
-
- if utils.compare_version('1.10', self._version) >= 0:
+ if utils.compare_version('1.10', self._version) < 0:
if dns is not None:
- start_config['Dns'] = dns
+ raise errors.InvalidVersion(
+ 'dns is only supported for API version >= 1.10'
+ )
if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
- volumes_from = volumes_from.split(',')
- start_config['VolumesFrom'] = volumes_from
- else:
- warning_message = ('{0!r} parameter is discarded. It is only'
- ' available for API version greater or equal'
- ' than 1.10')
+ raise errors.InvalidVersion(
+ 'volumes_from is only supported for API version >= 1.10'
+ )
- if dns is not None:
- warnings.warn(warning_message.format('dns'),
- DeprecationWarning)
- if volumes_from is not None:
- warnings.warn(warning_message.format('volumes_from'),
- DeprecationWarning)
- if dns_search:
- start_config['DnsSearch'] = dns_search
+ if utils.compare_version('1.15', self._version) < 0:
+ if security_opt is not None:
+ raise errors.InvalidVersion(
+ 'security_opt is only supported for API version >= 1.15'
+ )
+ if ipc_mode:
+ raise errors.InvalidVersion(
+ 'ipc_mode is only supported for API version >= 1.15'
+ )
- if network_mode:
- start_config['NetworkMode'] = network_mode
+ if utils.compare_version('1.17', self._version) < 0:
+ if read_only is not None:
+ raise errors.InvalidVersion(
+ 'read_only is only supported for API version >= 1.17'
+ )
+ if pid_mode is not None:
+ raise errors.InvalidVersion(
+ 'pid_mode is only supported for API version >= 1.17'
+ )
- if restart_policy:
- start_config['RestartPolicy'] = restart_policy
+ if utils.compare_version('1.18', self._version) < 0:
+ if ulimits is not None:
+ raise errors.InvalidVersion(
+ 'ulimits is only supported for API version >= 1.18'
+ )
- if cap_add:
- start_config['CapAdd'] = cap_add
+ start_config = utils.create_host_config(
+ binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
+ publish_all_ports=publish_all_ports, links=links, dns=dns,
+ privileged=privileged, dns_search=dns_search, cap_add=cap_add,
+ cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
+ network_mode=network_mode, restart_policy=restart_policy,
+ extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
+ ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
+ )
- if cap_drop:
- start_config['CapDrop'] = cap_drop
+ if isinstance(container, dict):
+ container = container.get('Id')
url = self._url("/containers/{0}/start".format(container))
+ if not start_config:
+ start_config = None
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
- def resize(self, container, height, width):
+ @check_resource
+ def stats(self, container, decode=None):
+ if utils.compare_version('1.17', self._version) < 0:
+ raise errors.InvalidVersion(
+ 'Stats retrieval is not supported in API < 1.17!')
+
if isinstance(container, dict):
container = container.get('Id')
+ url = self._url("/containers/{0}/stats".format(container))
+ return self._stream_helper(self._get(url, stream=True), decode=decode)
- params = {'h': height, 'w': width}
- url = self._url("/containers/{0}/resize".format(container))
- res = self._post(url, params=params)
- self._raise_for_status(res)
-
+ @check_resource
def stop(self, container, timeout=10):
if isinstance(container, dict):
container = container.get('Id')
params = {'t': timeout}
url = self._url("/containers/{0}/stop".format(container))
+
res = self._post(url, params=params,
- timeout=(timeout + self._timeout))
+ timeout=(timeout + self.timeout))
self._raise_for_status(res)
+ @check_resource
def tag(self, image, repository, tag=None, force=False):
params = {
'tag': tag,
@@ -926,20 +1096,43 @@ class Client(requests.Session):
self._raise_for_status(res)
return res.status_code == 201
+ @check_resource
def top(self, container):
+ if isinstance(container, dict):
+ container = container.get('Id')
u = self._url("/containers/{0}/top".format(container))
return self._result(self._get(u), True)
- def version(self):
- return self._result(self._get(self._url("/version")), True)
+ def version(self, api_version=True):
+ url = self._url("/version", versioned_api=api_version)
+ return self._result(self._get(url), json=True)
- def wait(self, container):
+ @check_resource
+ def unpause(self, container):
+ if isinstance(container, dict):
+ container = container.get('Id')
+ url = self._url('/containers/{0}/unpause'.format(container))
+ res = self._post(url)
+ self._raise_for_status(res)
+
+ @check_resource
+ def wait(self, container, timeout=None):
if isinstance(container, dict):
container = container.get('Id')
url = self._url("/containers/{0}/wait".format(container))
- res = self._post(url, timeout=None)
+ res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
if 'StatusCode' in json_:
return json_['StatusCode']
return -1
+
+
+class AutoVersionClient(Client):
+ def __init__(self, *args, **kwargs):
+ if 'version' in kwargs and kwargs['version']:
+ raise errors.DockerException(
+ 'Can not specify version for AutoVersionClient'
+ )
+ kwargs['version'] = 'auto'
+ super(AutoVersionClient, self).__init__(*args, **kwargs)
diff --git a/docker/constants.py b/docker/constants.py
new file mode 100644
index 0000000..233d9b1
--- /dev/null
+++ b/docker/constants.py
@@ -0,0 +1,6 @@
+DEFAULT_DOCKER_API_VERSION = '1.18'
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+CONTAINER_LIMITS_KEYS = [
+ 'memory', 'memswap', 'cpushares', 'cpusetcpus'
+]
diff --git a/docker/errors.py b/docker/errors.py
index 749facf..d15e332 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -30,15 +30,15 @@ class APIError(requests.exceptions.HTTPError):
message = super(APIError, self).__str__()
if self.is_client_error():
- message = '%s Client Error: %s' % (
+ message = '{0} Client Error: {1}'.format(
self.response.status_code, self.response.reason)
elif self.is_server_error():
- message = '%s Server Error: %s' % (
+ message = '{0} Server Error: {1}'.format(
self.response.status_code, self.response.reason)
if self.explanation:
- message = '%s ("%s")' % (message, self.explanation)
+ message = '{0} ("{1}")'.format(message, self.explanation)
return message
@@ -53,6 +53,10 @@ class DockerException(Exception):
pass
+class InvalidVersion(DockerException):
+ pass
+
+
class InvalidRepository(DockerException):
pass
@@ -74,3 +78,7 @@ class TLSParameterError(DockerException):
"client configurations. See "
"http://docs.docker.com/examples/https/ for "
"API details.")
+
+
+class NullResource(DockerException, ValueError):
+ pass
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
index e243d07..3a70a91 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -4,20 +4,33 @@
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+import ssl
+
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
-
PoolManager = urllib3.poolmanager.PoolManager
+def get_max_tls_protocol():
+ protocols = ('PROTOCOL_TLSv1_2',
+ 'PROTOCOL_TLSv1_1',
+ 'PROTOCOL_TLSv1')
+ for proto in protocols:
+ if hasattr(ssl, proto):
+ return getattr(ssl, proto)
+
+
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
- def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
+ def __init__(self, ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None, **kwargs):
+ ssl_version = ssl_version or get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
@@ -26,6 +39,7 @@ class SSLAdapter(HTTPAdapter):
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
+ 'assert_fingerprint': self.assert_fingerprint,
}
if self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
diff --git a/docker/tls.py b/docker/tls.py
index 0e78984..d888b7d 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -10,17 +10,22 @@ class TLSConfig(object):
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
- ssl_version=None, assert_hostname=None):
+ ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None):
# Argument compatibility/mapping with
# http://docs.docker.com/examples/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
- # urllib3 sets a default ssl_version if ssl_version is None
- # http://tinyurl.com/kxga8hb
+ # urllib3 sets a default ssl_version if ssl_version is None,
+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,
+ # so we override the default with the maximum supported in the running
+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)
+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
@@ -69,4 +74,5 @@ class TLSConfig(object):
client.mount('https://', ssladapter.SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
))
diff --git a/docker/unixconn/unixconn.py b/docker/unixconn/unixconn.py
index 3d3f7bc..551bd29 100644
--- a/docker/unixconn/unixconn.py
+++ b/docker/unixconn/unixconn.py
@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
+import requests.adapters
+import socket
if six.PY3:
import http.client as httplib
else:
import httplib
-import requests.adapters
-import socket
try:
- import requests.packages.urllib3.connectionpool as connectionpool
+ import requests.packages.urllib3 as urllib3
except ImportError:
- import urllib3.connectionpool as connectionpool
+ import urllib3
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(httplib.HTTPConnection, object):
@@ -36,22 +38,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
- sock.connect(self.base_url.replace("http+unix:/", ""))
+ sock.connect(self.unix_socket)
self.sock = sock
- def _extract_path(self, url):
- # remove the base_url entirely..
- return url.replace(self.base_url, "")
-
- def request(self, method, url, **kwargs):
- url = self._extract_path(self.unix_socket)
- super(UnixHTTPConnection, self).request(method, url, **kwargs)
-
-class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60):
- connectionpool.HTTPConnectionPool.__init__(self, 'localhost',
- timeout=timeout)
+ urllib3.connectionpool.HTTPConnectionPool.__init__(
+ self, 'localhost', timeout=timeout
+ )
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
@@ -62,10 +57,28 @@ class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):
class UnixAdapter(requests.adapters.HTTPAdapter):
- def __init__(self, base_url, timeout=60):
- self.base_url = base_url
+ def __init__(self, socket_url, timeout=60):
+ socket_path = socket_url.replace('http+unix://', '')
+ if not socket_path.startswith('/'):
+ socket_path = '/' + socket_path
+ self.socket_path = socket_path
self.timeout = timeout
+ self.pools = RecentlyUsedContainer(10,
+ dispose_func=lambda p: p.close())
super(UnixAdapter, self).__init__()
- def get_connection(self, socket_path, proxies=None):
- return UnixHTTPConnectionPool(self.base_url, socket_path, self.timeout)
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(url,
+ self.socket_path,
+ self.timeout)
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ self.pools.clear()
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 5d2c1b8..81cc8a6 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,4 +1,9 @@
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
- mkbuildcontext, ping, tar, parse_repository_tag, parse_host
+ mkbuildcontext, tar, parse_repository_tag, parse_host,
+ kwargs_from_env, convert_filters, create_host_config,
+ create_container_config, parse_bytes, ping_registry
) # flake8: noqa
+
+from .types import Ulimit, LogConfig # flake8: noqa
+from .decorators import check_resource #flake8: noqa
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
new file mode 100644
index 0000000..1ed0b6a
--- /dev/null
+++ b/docker/utils/decorators.py
@@ -0,0 +1,16 @@
+from .. import errors
+
+
+def check_resource(f):
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None:
+ if kwargs.get('container'):
+ resource_id = kwargs.pop('container')
+ elif kwargs.get('image'):
+ resource_id = kwargs.pop('image')
+ else:
+ raise errors.NullResource(
+ 'image or container param is undefined'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
diff --git a/docker/utils/ports/__init__.py b/docker/utils/ports/__init__.py
new file mode 100644
index 0000000..1dbfa3a
--- /dev/null
+++ b/docker/utils/ports/__init__.py
@@ -0,0 +1,4 @@
+from .ports import (
+ split_port,
+ build_port_bindings
+) # flake8: noqa
diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py
new file mode 100644
index 0000000..6a0a862
--- /dev/null
+++ b/docker/utils/ports/ports.py
@@ -0,0 +1,84 @@
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def to_port_range(port):
+ if not port:
+ return None
+
+ protocol = ""
+ if "/" in port:
+ parts = port.split("/")
+ if len(parts) != 2:
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+ port, protocol = parts
+ protocol = "/" + protocol
+
+ parts = str(port).split('-')
+
+ if len(parts) == 1:
+ return ["%s%s" % (port, protocol)]
+
+ if len(parts) == 2:
+ full_port_range = range(int(parts[0]), int(parts[1]) + 1)
+ return ["%s%s" % (p, protocol) for p in full_port_range]
+
+ raise ValueError('Invalid port range "%s", should be '
+ 'port or startport-endport' % port)
+
+
+def split_port(port):
+ parts = str(port).split(':')
+ if not 1 <= len(parts) <= 3:
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port:]port[/protocol]' % port)
+
+ if len(parts) == 1:
+ internal_port, = parts
+ return to_port_range(internal_port), None
+ if len(parts) == 2:
+ external_port, internal_port = parts
+
+ internal_range = to_port_range(internal_port)
+ external_range = to_port_range(external_port)
+ if len(internal_range) != len(external_range):
+ raise ValueError('Port ranges don\'t match in length')
+
+ return internal_range, external_range
+
+ external_ip, external_port, internal_port = parts
+ internal_range = to_port_range(internal_port)
+ external_range = to_port_range(external_port)
+ if not external_range:
+ external_range = [None] * len(internal_range)
+
+ if len(internal_range) != len(external_range):
+ raise ValueError('Port ranges don\'t match in length')
+
+ return internal_range, [(external_ip, ex_port or None)
+ for ex_port in external_range]
diff --git a/docker/utils/types.py b/docker/utils/types.py
new file mode 100644
index 0000000..d742fd0
--- /dev/null
+++ b/docker/utils/types.py
@@ -0,0 +1,100 @@
+import six
+
+
+class LogConfigTypesEnum(object):
+ _values = (
+ 'json-file',
+ 'syslog',
+ 'none'
+ )
+ JSON, SYSLOG, NONE = _values
+
+
+class DictType(dict):
+ def __init__(self, init):
+ for k, v in six.iteritems(init):
+ self[k] = v
+
+
+class LogConfig(DictType):
+ types = LogConfigTypesEnum
+
+ def __init__(self, **kwargs):
+ type_ = kwargs.get('type', kwargs.get('Type'))
+ config = kwargs.get('config', kwargs.get('Config'))
+ if type_ not in self.types._values:
+ raise ValueError("LogConfig.type must be one of ({0})".format(
+ ', '.join(self.types._values)
+ ))
+ if config and not isinstance(config, dict):
+ raise ValueError("LogConfig.config must be a dictionary")
+
+ super(LogConfig, self).__init__({
+ 'Type': type_,
+ 'Config': config or {}
+ })
+
+ @property
+ def type(self):
+ return self['Type']
+
+ @type.setter
+ def type(self, value):
+ if value not in self.types._values:
+ raise ValueError("LogConfig.type must be one of {0}".format(
+ ', '.join(self.types._values)
+ ))
+ self['Type'] = value
+
+ @property
+ def config(self):
+ return self['Config']
+
+ def set_config_value(self, key, value):
+ self.config[key] = value
+
+ def unset_config(self, key):
+ if key in self.config:
+ del self.config[key]
+
+
+class Ulimit(DictType):
+ def __init__(self, **kwargs):
+ name = kwargs.get('name', kwargs.get('Name'))
+ soft = kwargs.get('soft', kwargs.get('Soft'))
+ hard = kwargs.get('hard', kwargs.get('Hard'))
+ if not isinstance(name, six.string_types):
+ raise ValueError("Ulimit.name must be a string")
+ if soft and not isinstance(soft, int):
+ raise ValueError("Ulimit.soft must be an integer")
+ if hard and not isinstance(hard, int):
+ raise ValueError("Ulimit.hard must be an integer")
+ super(Ulimit, self).__init__({
+ 'Name': name,
+ 'Soft': soft,
+ 'Hard': hard
+ })
+
+ @property
+ def name(self):
+ return self['Name']
+
+ @name.setter
+ def name(self, value):
+ self['Name'] = value
+
+ @property
+ def soft(self):
+ return self.get('Soft')
+
+ @soft.setter
+ def soft(self, value):
+ self['Soft'] = value
+
+ @property
+ def hard(self):
+ return self.get('Hard')
+
+ @hard.setter
+ def hard(self, value):
+ self['Hard'] = value
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index fb05a1e..a18939d 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -14,18 +14,31 @@
import io
import os
+import os.path
+import json
+import shlex
import tarfile
import tempfile
from distutils.version import StrictVersion
from fnmatch import fnmatch
+from datetime import datetime
import requests
import six
from .. import errors
+from .. import tls
+from .types import Ulimit, LogConfig
+
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
def mkbuildcontext(dockerfile):
@@ -38,9 +51,11 @@ def mkbuildcontext(dockerfile):
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
@@ -69,9 +84,14 @@ def tar(path, exclude=None):
fnames = [name for name in filenames
if not fnmatch_any(os.path.join(relpath, name),
exclude)]
- for name in fnames:
+ dirnames.sort()
+ for name in sorted(fnames):
arcname = os.path.join(relpath, name)
t.add(os.path.join(path, arcname), arcname=arcname)
+ for name in dirnames:
+ arcname = os.path.join(relpath, name)
+ t.add(os.path.join(path, arcname),
+ arcname=arcname, recursive=False)
t.close()
f.seek(0)
return f
@@ -99,6 +119,10 @@ def compare_version(v1, v2):
return 1
+def ping_registry(url):
+ return ping(url + '/v2/') or ping(url + '/v1/_ping')
+
+
def ping(url):
try:
res = requests.get(url, timeout=3)
@@ -153,11 +177,11 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
if isinstance(v, dict):
- result.append('%s:%s:%s' % (
+ result.append('{0}:{1}:{2}'.format(
k, v['bind'], 'ro' if v.get('ro', False) else 'rw'
))
else:
- result.append('%s:%s:rw' % (k, v))
+ result.append('{0}:{1}:rw'.format(k, v))
return result
@@ -191,7 +215,8 @@ def parse_host(addr):
addr = addr.replace('http+unix://', 'unix://')
if addr == 'tcp://':
- raise errors.DockerException("Invalid bind address format: %s" % addr)
+ raise errors.DockerException(
+ "Invalid bind address format: {0}".format(addr))
elif addr.startswith('unix://'):
addr = addr[7:]
elif addr.startswith('tcp://'):
@@ -205,7 +230,7 @@ def parse_host(addr):
else:
if "://" in addr:
raise errors.DockerException(
- "Invalid bind address protocol: %s" % addr
+ "Invalid bind address protocol: {0}".format(addr)
)
proto = "http"
@@ -213,7 +238,7 @@ def parse_host(addr):
host_parts = addr.split(':')
if len(host_parts) != 2:
raise errors.DockerException(
- "Invalid bind address format: %s" % addr
+ "Invalid bind address format: {0}".format(addr)
)
if host_parts[0]:
host = host_parts[0]
@@ -226,10 +251,335 @@ def parse_host(addr):
)
elif proto in ("http", "https") and ':' not in addr:
- raise errors.DockerException("Bind address needs a port: %s" % addr)
+ raise errors.DockerException(
+ "Bind address needs a port: {0}".format(addr))
else:
host = addr
if proto == "http+unix":
- return "%s://%s" % (proto, host)
- return "%s://%s:%d" % (proto, host, port)
+ return "{0}://{1}".format(proto, host)
+ return "{0}://{1}:{2}".format(proto, host, port)
+
+
+def parse_devices(devices):
+ device_list = []
+ for device in devices:
+ device_mapping = device.split(":")
+ if device_mapping:
+ path_on_host = device_mapping[0]
+ if len(device_mapping) > 1:
+ path_in_container = device_mapping[1]
+ else:
+ path_in_container = path_on_host
+ if len(device_mapping) > 2:
+ permissions = device_mapping[2]
+ else:
+ permissions = 'rwm'
+ device_list.append({"PathOnHost": path_on_host,
+ "PathInContainer": path_in_container,
+ "CgroupPermissions": permissions})
+ return device_list
+
+
+def kwargs_from_env(ssl_version=None, assert_hostname=None):
+ host = os.environ.get('DOCKER_HOST')
+ cert_path = os.environ.get('DOCKER_CERT_PATH')
+ tls_verify = os.environ.get('DOCKER_TLS_VERIFY')
+
+ params = {}
+ if host:
+ params['base_url'] = (host.replace('tcp://', 'https://')
+ if tls_verify else host)
+ if tls_verify and cert_path:
+ params['tls'] = tls.TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=True,
+ ssl_version=ssl_version,
+ assert_hostname=assert_hostname)
+ return params
+
+
+def convert_filters(filters):
+ result = {}
+ for k, v in six.iteritems(filters):
+ if isinstance(v, bool):
+ v = 'true' if v else 'false'
+ if not isinstance(v, list):
+ v = [v, ]
+ result[k] = v
+ return json.dumps(result)
+
+
+def datetime_to_timestamp(dt=datetime.now()):
+ """Convert a datetime in local timezone to a unix timestamp"""
+ delta = dt - datetime.fromtimestamp(0)
+ return delta.seconds + delta.days * 24 * 3600
+
+
+def parse_bytes(s):
+ if len(s) == 0:
+ s = 0
+ else:
+ units = BYTE_UNITS
+ suffix = s[-1].lower()
+
+ # Check if the variable is a string representation of an int
+ # without a units part. Assuming that the units are bytes.
+ if suffix.isdigit():
+ digits_part = s
+ suffix = 'b'
+ else:
+ digits_part = s[:-1]
+
+ if suffix in units.keys() or suffix.isdigit():
+ try:
+ digits = int(digits_part)
+ except ValueError:
+ message = ('Failed converting the string value for'
+ 'memory ({0}) to a number.')
+ formatted_message = message.format(digits_part)
+ raise errors.DockerException(formatted_message)
+
+ s = digits * units[suffix]
+ else:
+ message = ('The specified value for memory'
+ ' ({0}) should specify the units. The postfix'
+ ' should be one of the `b` `k` `m` `g`'
+ ' characters')
+ raise errors.DockerException(message.format(s))
+
+ return s
+
+
+def create_host_config(
+ binds=None, port_bindings=None, lxc_conf=None,
+ publish_all_ports=False, links=None, privileged=False,
+ dns=None, dns_search=None, volumes_from=None, network_mode=None,
+ restart_policy=None, cap_add=None, cap_drop=None, devices=None,
+ extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None
+):
+ host_config = {}
+
+ if pid_mode not in (None, 'host'):
+ raise errors.DockerException(
+ 'Invalid value for pid param: {0}'.format(pid_mode)
+ )
+ elif pid_mode:
+ host_config['PidMode'] = pid_mode
+
+ if ipc_mode:
+ host_config['IpcMode'] = ipc_mode
+
+ if privileged:
+ host_config['Privileged'] = privileged
+
+ if publish_all_ports:
+ host_config['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ host_config['ReadOnlyRootFs'] = read_only
+
+ if dns_search:
+ host_config['DnsSearch'] = dns_search
+
+ if network_mode:
+ host_config['NetworkMode'] = network_mode
+
+ if restart_policy:
+ host_config['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ host_config['CapAdd'] = cap_add
+
+ if cap_drop:
+ host_config['CapDrop'] = cap_drop
+
+ if devices:
+ host_config['Devices'] = parse_devices(devices)
+
+ if dns is not None:
+ host_config['Dns'] = dns
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise errors.DockerException(
+ 'Invalid type for security_opt param: expected list but found'
+ ' {0}'.format(type(security_opt))
+ )
+ host_config['SecurityOpt'] = security_opt
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+ host_config['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ host_config['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ host_config['PortBindings'] = convert_port_bindings(
+ port_bindings
+ )
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = [
+ '{0}:{1}'.format(k, v)
+ for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+ host_config['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ if isinstance(links, dict):
+ links = six.iteritems(links)
+
+ formatted_links = [
+ '{0}:{1}'.format(k, v) for k, v in sorted(links)
+ ]
+
+ host_config['Links'] = formatted_links
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in six.iteritems(lxc_conf):
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ host_config['LxcConf'] = lxc_conf
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise errors.DockerException(
+ 'Invalid type for ulimits param: expected list but found'
+ ' {0}'.format(type(ulimits))
+ )
+ host_config['Ulimits'] = []
+ for l in ulimits:
+ if not isinstance(l, Ulimit):
+ l = Ulimit(**l)
+ host_config['Ulimits'].append(l)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise errors.DockerException(
+ 'Invalid type for log_config param: expected LogConfig but'
+ ' found {0}'.format(type(log_config))
+ )
+ log_config = LogConfig(**log_config)
+ host_config['LogConfig'] = log_config
+
+ return host_config
+
+
+def create_container_config(
+ version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, mem_limit=0, ports=None, environment=None,
+ dns=None, volumes=None, volumes_from=None, network_disabled=False,
+ entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
+ memswap_limit=0, cpuset=None, host_config=None, mac_address=None,
+ labels=None
+):
+ if isinstance(command, six.string_types):
+ command = shlex.split(str(command))
+ if isinstance(environment, dict):
+ environment = [
+ six.text_type('{0}={1}').format(k, v)
+ for k, v in six.iteritems(environment)
+ ]
+
+ if labels is not None and compare_version('1.18', version) < 0:
+ raise errors.DockerException(
+ 'labels were only introduced in API version 1.18'
+ )
+
+ if isinstance(labels, list):
+ labels = dict((lbl, six.text_type('')) for lbl in labels)
+
+ if isinstance(mem_limit, six.string_types):
+ mem_limit = parse_bytes(mem_limit)
+ if isinstance(memswap_limit, six.string_types):
+ memswap_limit = parse_bytes(memswap_limit)
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if volumes_from:
+ if not isinstance(volumes_from, six.string_types):
+ volumes_from = ','.join(volumes_from)
+ else:
+ # Force None, an empty list or dict causes client.start to fail
+ volumes_from = None
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ if compare_version('1.10', version) >= 0:
+ message = ('{0!r} parameter has no effect on create_container().'
+ ' It has been moved to start()')
+ if dns is not None:
+ raise errors.DockerException(message.format('dns'))
+ if volumes_from is not None:
+ raise errors.DockerException(message.format('volumes_from'))
+
+ return {
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': user,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'Memory': mem_limit,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Dns': dns,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'VolumesFrom': volumes_from,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'CpuShares': cpu_shares,
+ 'Cpuset': cpuset,
+ 'WorkingDir': working_dir,
+ 'MemorySwap': memswap_limit,
+ 'HostConfig': host_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ }
diff --git a/docker/version.py b/docker/version.py
index f4c4f3f..a57056b 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1 +1,2 @@
-version = "0.5.3"
+version = "1.2.2"
+version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])