summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
authorSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 11:55:16 -0700
committerSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 11:55:16 -0700
commit42969e566b350dcd593ec27d29fdc59e0ffebecf (patch)
treecc8eaafe39c185b64e01ca188a729360a55c3660 /docker
parent9dc217112a6ea6cabd11edd1e72c96b52946b680 (diff)
Imported Upstream version 0.3.2
Diffstat (limited to 'docker')
-rw-r--r--docker/__init__.py7
-rw-r--r--docker/auth/auth.py128
-rw-r--r--docker/client.py583
-rw-r--r--docker/errors.py65
-rw-r--r--docker/unixconn/unixconn.py4
-rw-r--r--docker/utils/__init__.py3
-rw-r--r--docker/utils/utils.py55
-rw-r--r--docker/version.py1
8 files changed, 593 insertions, 253 deletions
diff --git a/docker/__init__.py b/docker/__init__.py
index 5f642a8..343766d 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -12,4 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from .client import Client, APIError # flake8: noqa
+from .version import version
+
+__version__ = version
+__title__ = 'docker-py'
+
+from .client import Client # flake8: noqa
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 0e32477..0bd386d 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -13,14 +13,17 @@
# limitations under the License.
import base64
+import fileinput
import json
import os
import six
-import docker.utils as utils
+from ..utils import utils
+from .. import errors
INDEX_URL = 'https://index.docker.io/v1/'
+DOCKER_CONFIG_FILENAME = '.dockercfg'
def swap_protocol(url):
@@ -43,28 +46,32 @@ def expand_registry_url(hostname):
def resolve_repository_name(repo_name):
if '://' in repo_name:
- raise ValueError('Repository name cannot contain a '
- 'scheme ({0})'.format(repo_name))
+ raise errors.InvalidRepository(
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name))
parts = repo_name.split('/', 1)
- if not '.' in parts[0] and not ':' in parts[0] and parts[0] != 'localhost':
+ if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
# This is a docker index repo (ex: foo/bar or ubuntu)
return INDEX_URL, repo_name
if len(parts) < 2:
- raise ValueError('Invalid repository name ({0})'.format(repo_name))
+ raise errors.InvalidRepository(
+ 'Invalid repository name ({0})'.format(repo_name))
if 'index.docker.io' in parts[0]:
- raise ValueError('Invalid repository name,'
- 'try "{0}" instead'.format(parts[1]))
+ raise errors.InvalidRepository(
+ 'Invalid repository name, try "{0}" instead'.format(parts[1]))
return expand_registry_url(parts[0]), parts[1]
-def resolve_authconfig(authconfig, registry):
- default = {}
- if registry == INDEX_URL or registry == '':
- # default to the index server
- return authconfig['Configs'].get(INDEX_URL, default)
- # if its not the index server there are three cases:
+def resolve_authconfig(authconfig, registry=None):
+ """Return the authentication data from the given auth configuration for a
+ specific registry. We'll do our best to infer the correct URL for the
+ registry, trying both http and https schemes. Returns an empty dictionnary
+ if no data exists."""
+ # Default to the public index server
+ registry = registry or INDEX_URL
+
+ # Ff its not the index server there are three cases:
#
# 1. this is a full config url -> it should be used as is
# 2. it could be a full url, but with the wrong protocol
@@ -77,11 +84,14 @@ def resolve_authconfig(authconfig, registry):
if not registry.startswith('http:') and not registry.startswith('https:'):
registry = 'https://' + registry
- if registry in authconfig['Configs']:
- return authconfig['Configs'][registry]
- elif swap_protocol(registry) in authconfig['Configs']:
- return authconfig['Configs'][swap_protocol(registry)]
- return default
+ if registry in authconfig:
+ return authconfig[registry]
+ return authconfig.get(swap_protocol(registry), None)
+
+
+def encode_auth(auth_info):
+ return base64.b64encode(auth_info.get('username', '') + b':' +
+ auth_info.get('password', ''))
def decode_auth(auth):
@@ -97,39 +107,61 @@ def encode_header(auth):
return base64.b64encode(auth_json)
+def encode_full_header(auth):
+ """ Returns the given auth block encoded for the X-Registry-Config header.
+ """
+ return encode_header({'configs': auth})
+
+
def load_config(root=None):
- root = root or os.environ['HOME']
- config = {
- 'Configs': {},
- 'rootPath': root
- }
+ """Loads authentication data from a Docker configuration file in the given
+ root directory."""
+ conf = {}
+ data = None
- config_file = os.path.join(root, '.dockercfg')
- if not os.path.exists(config_file):
- return config
+ config_file = os.path.join(root or os.environ.get('HOME', '.'),
+ DOCKER_CONFIG_FILENAME)
- f = open(config_file)
+ # First try as JSON
+ try:
+ with open(config_file) as f:
+ conf = {}
+ for registry, entry in six.iteritems(json.load(f)):
+ username, password = decode_auth(entry['auth'])
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry['email'],
+ 'serveraddress': registry,
+ }
+ return conf
+ except:
+ pass
+
+ # If that fails, we assume the configuration file contains a single
+ # authentication token for the public registry in the following format:
+ #
+ # auth = AUTH_TOKEN
+ # email = email@domain.com
try:
- config['Configs'] = json.load(f)
- for k, conf in six.iteritems(config['Configs']):
- conf['Username'], conf['Password'] = decode_auth(conf['auth'])
- del conf['auth']
- config['Configs'][k] = conf
- except Exception:
- f.seek(0)
- buf = []
- for line in f:
- k, v = line.split(' = ')
- buf.append(v)
- if len(buf) < 2:
- raise Exception("The Auth config file is empty")
- user, pwd = decode_auth(buf[0])
- config['Configs'][INDEX_URL] = {
- 'Username': user,
- 'Password': pwd,
- 'Email': buf[1]
+ data = []
+ for line in fileinput.input(config_file):
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!')
+
+ username, password = decode_auth(data[0])
+ conf[INDEX_URL] = {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
}
- finally:
- f.close()
+ return conf
+ except:
+ pass
- return config
+ # If all fails, return an empty config
+ return {}
diff --git a/docker/client.py b/docker/client.py
index 90f430c..a1c8ed2 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -16,68 +16,45 @@ import json
import re
import shlex
import struct
+import warnings
import requests
import requests.exceptions
import six
-import docker.auth as auth
-import docker.unixconn as unixconn
-import docker.utils as utils
+from .auth import auth
+from .unixconn import unixconn
+from .utils import utils
+from . import errors
if not six.PY3:
import websocket
+DEFAULT_DOCKER_API_VERSION = '1.12'
DEFAULT_TIMEOUT_SECONDS = 60
-
-
-class APIError(requests.exceptions.HTTPError):
- def __init__(self, message, response, explanation=None):
- super(APIError, self).__init__(message, response=response)
-
- self.explanation = explanation
-
- if self.explanation is None and response.content:
- self.explanation = response.content.strip()
-
- def __str__(self):
- message = super(APIError, self).__str__()
-
- if self.is_client_error():
- message = '%s Client Error: %s' % (
- self.response.status_code, self.response.reason)
-
- elif self.is_server_error():
- message = '%s Server Error: %s' % (
- self.response.status_code, self.response.reason)
-
- if self.explanation:
- message = '%s ("%s")' % (message, self.explanation)
-
- return message
-
- def is_client_error(self):
- return 400 <= self.response.status_code < 500
-
- def is_server_error(self):
- return 500 <= self.response.status_code < 600
+STREAM_HEADER_SIZE_BYTES = 8
class Client(requests.Session):
- def __init__(self, base_url="unix://var/run/docker.sock", version="1.6",
+ def __init__(self, base_url=None, version=DEFAULT_DOCKER_API_VERSION,
timeout=DEFAULT_TIMEOUT_SECONDS):
super(Client, self).__init__()
- if base_url.startswith('unix:///'):
+ if base_url is None:
+ base_url = "http+unix://var/run/docker.sock"
+ if 'unix:///' in base_url:
base_url = base_url.replace('unix:/', 'unix:')
+ if base_url.startswith('unix:'):
+ base_url = "http+" + base_url
+ if base_url.startswith('tcp:'):
+ base_url = base_url.replace('tcp:', 'http:')
+ if base_url.endswith('/'):
+ base_url = base_url[:-1]
self.base_url = base_url
self._version = version
self._timeout = timeout
+ self._auth_configs = auth.load_config()
- self.mount('unix://', unixconn.UnixAdapter(base_url, timeout))
- try:
- self._cfg = auth.load_config()
- except Exception:
- pass
+ self.mount('http+unix://', unixconn.UnixAdapter(base_url, timeout))
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
@@ -102,30 +79,25 @@ class Client(requests.Session):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
- raise APIError(e, response, explanation=explanation)
-
- def _stream_result(self, response):
- self._raise_for_status(response)
- for line in response.iter_lines(chunk_size=1):
- # filter out keep-alive new lines
- if line:
- yield line + '\n'
-
- def _stream_result_socket(self, response):
- self._raise_for_status(response)
- return response.raw._fp.fp._sock
+ raise errors.APIError(e, response, explanation=explanation)
- def _result(self, response, json=False):
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
+ if binary:
+ return response.content
return response.text
def _container_config(self, image, command, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=0, ports=None, environment=None, dns=None,
- volumes=None, volumes_from=None, privileged=False):
+ volumes=None, volumes_from=None,
+ network_disabled=False, entrypoint=None,
+ cpu_shares=None, working_dir=None, domainname=None,
+ memswap_limit=0):
if isinstance(command, six.string_types):
command = shlex.split(str(command))
if isinstance(environment, dict):
@@ -133,30 +105,35 @@ class Client(requests.Session):
'{0}={1}'.format(k, v) for k, v in environment.items()
]
- if ports and isinstance(ports, list):
+ if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
- proto = None
+ proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
- exposed_ports['{0}{1}'.format(
- port,
- '/' + proto if proto else ''
- )] = {}
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
- if volumes and isinstance(volumes, list):
+ if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
+ if volumes_from:
+ if not isinstance(volumes_from, six.string_types):
+ volumes_from = ','.join(volumes_from)
+ else:
+ # Force None, an empty list or dict causes client.start to fail
+ volumes_from = None
+
attach_stdin = False
attach_stdout = False
attach_stderr = False
+ stdin_once = False
if not detach:
attach_stdout = True
@@ -164,24 +141,39 @@ class Client(requests.Session):
if stdin_open:
attach_stdin = True
+ stdin_once = True
+
+ if utils.compare_version('1.10', self._version) >= 0:
+ message = ('{0!r} parameter has no effect on create_container().'
+ ' It has been moved to start()')
+ if dns is not None:
+ raise errors.DockerException(message.format('dns'))
+ if volumes_from is not None:
+ raise errors.DockerException(message.format('volumes_from'))
return {
- 'Hostname': hostname,
+ 'Hostname': hostname,
+ 'Domainname': domainname,
'ExposedPorts': ports,
- 'User': user,
- 'Tty': tty,
- 'OpenStdin': stdin_open,
- 'Memory': mem_limit,
- 'AttachStdin': attach_stdin,
+ 'User': user,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'Memory': mem_limit,
+ 'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
- 'Env': environment,
- 'Cmd': command,
- 'Dns': dns,
- 'Image': image,
- 'Volumes': volumes,
- 'VolumesFrom': volumes_from,
- 'Privileged': privileged,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Dns': dns,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'VolumesFrom': volumes_from,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'CpuShares': cpu_shares,
+ 'WorkingDir': working_dir,
+ 'MemorySwap': memswap_limit
}
def _post_json(self, url, data, **kwargs):
@@ -219,10 +211,26 @@ class Client(requests.Session):
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if six.PY3:
+ return response.raw._fp.fp.raw._sock
+ else:
+ return response.raw._fp.fp._sock
+
def _stream_helper(self, response):
- socket = self._stream_result_socket(response).makefile()
+ """Generator for data coming from a chunked-encoded HTTP response."""
+ socket_fp = self._get_raw_response_socket(response)
+ socket_fp.setblocking(1)
+ socket = socket_fp.makefile()
while True:
- size = int(socket.readline(), 16)
+ # Because Docker introduced newlines at the end of chunks in v0.9,
+ # and only on some API endpoints, we have to cater for both cases.
+ size_line = socket.readline()
+ if size_line == '\r\n':
+ size_line = socket.readline()
+
+ size = int(size_line, 16)
if size <= 0:
break
data = socket.readline()
@@ -230,15 +238,83 @@ class Client(requests.Session):
break
yield data
- def attach(self, container):
- socket = self.attach_socket(container)
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ walker = 0
+ while True:
+ if len(buf[walker:]) < 8:
+ break
+ _, length = struct.unpack_from('>BxxxL', buf[walker:])
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_socket_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ socket."""
+ socket = self._get_raw_response_socket(response)
+
+ def recvall(socket, size):
+ blocks = []
+ while size > 0:
+ block = socket.recv(size)
+ if not block:
+ return None
+
+ blocks.append(block)
+ size -= len(block)
+
+ sep = bytes() if six.PY3 else str()
+ data = sep.join(blocks)
+ return data
while True:
- chunk = socket.recv(4096)
- if chunk:
- yield chunk
- else:
+ socket.settimeout(None)
+ header = recvall(socket, STREAM_HEADER_SIZE_BYTES)
+ if not header:
break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ break
+ data = recvall(socket, length)
+ if not data:
+ break
+ yield data
+
+ def attach(self, container, stdout=True, stderr=True,
+ stream=False, logs=False):
+ if isinstance(container, dict):
+ container = container.get('Id')
+ params = {
+ 'logs': logs and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'stderr': stderr and 1 or 0,
+ 'stream': stream and 1 or 0,
+ }
+ u = self._url("/containers/{0}/attach".format(container))
+ response = self._post(u, params=params, stream=stream)
+
+ # Stream multi-plexing was only introduced in API v1.6. Anything before
+ # that needs old-style streaming.
+ if utils.compare_version('1.6', self._version) < 0:
+ def stream_result():
+ self._raise_for_status(response)
+ for line in response.iter_lines(chunk_size=1,
+ decode_unicode=True):
+ # filter out keep-alive new lines
+ if line:
+ yield line
+
+ return stream_result() if stream else \
+ self._result(response, binary=True)
+
+ sep = bytes() if six.PY3 else str()
+
+ return stream and self._multiplexed_socket_stream_helper(response) or \
+ sep.join([x for x in self._multiplexed_buffer_helper(response)])
def attach_socket(self, container, params=None, ws=False):
if params is None:
@@ -247,28 +323,39 @@ class Client(requests.Session):
'stderr': 1,
'stream': 1
}
+
if ws:
return self._attach_websocket(container, params)
if isinstance(container, dict):
container = container.get('Id')
+
u = self._url("/containers/{0}/attach".format(container))
- return self._stream_result_socket(self.post(
+ return self._get_raw_response_socket(self.post(
u, None, params=self._attach_params(params), stream=True))
def build(self, path=None, tag=None, quiet=False, fileobj=None,
- nocache=False, rm=False, stream=False):
+ nocache=False, rm=False, stream=False, timeout=None,
+ custom_context=False, encoding=None):
remote = context = headers = None
if path is None and fileobj is None:
- raise Exception("Either path or fileobj needs to be provided.")
+ raise TypeError("Either path or fileobj needs to be provided.")
- if fileobj is not None:
+ if custom_context:
+ if not fileobj:
+ raise TypeError("You must specify fileobj with custom_context")
+ context = fileobj
+ elif fileobj is not None:
context = utils.mkbuildcontext(fileobj)
- elif path.startswith(('http://', 'https://', 'git://', 'github.com/')):
+ elif path.startswith(('http://', 'https://',
+ 'git://', 'github.com/')):
remote = path
else:
context = utils.tar(path)
+ if utils.compare_version('1.8', self._version) >= 0:
+ stream = True
+
u = self._url('/build')
params = {
't': tag,
@@ -277,17 +364,39 @@ class Client(requests.Session):
'nocache': nocache,
'rm': rm
}
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
+ if encoding:
+ headers['Content-Encoding'] = encoding
+
+ if utils.compare_version('1.9', self._version) >= 0:
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs:
+ self._auth_configs = auth.load_config()
+
+ # Send the full auth configuration (if any exists), since the build
+ # could use any (or all) of the registries.
+ if self._auth_configs:
+ headers['X-Registry-Config'] = auth.encode_full_header(
+ self._auth_configs
+ )
response = self._post(
- u, data=context, params=params, headers=headers, stream=stream
+ u,
+ data=context,
+ params=params,
+ headers=headers,
+ stream=stream,
+ timeout=timeout,
)
if context is not None:
context.close()
+
if stream:
- return self._stream_result(response)
+ return self._stream_helper(response)
else:
output = self._result(response)
srch = r'Successfully built ([0-9a-f]+)'
@@ -310,10 +419,11 @@ class Client(requests.Session):
json=True)
def containers(self, quiet=False, all=False, trunc=True, latest=False,
- since=None, before=None, limit=-1):
+ since=None, before=None, limit=-1, size=False):
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
+ 'size': 1 if size else 0,
'trunc_cmd': 1 if trunc else 0,
'since': since,
'before': before
@@ -326,6 +436,8 @@ class Client(requests.Session):
return res
def copy(self, container, resource):
+ if isinstance(container, dict):
+ container = container.get('Id')
res = self._post_json(
self._url("/containers/{0}/copy".format(container)),
data={"Resource": resource},
@@ -337,12 +449,15 @@ class Client(requests.Session):
def create_container(self, image, command=None, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=0, ports=None, environment=None, dns=None,
- volumes=None, volumes_from=None, privileged=False,
- name=None):
+ volumes=None, volumes_from=None,
+ network_disabled=False, name=None, entrypoint=None,
+ cpu_shares=None, working_dir=None, domainname=None,
+ memswap_limit=0):
config = self._container_config(
image, command, hostname, user, detach, stdin_open, tty, mem_limit,
- ports, environment, dns, volumes, volumes_from, privileged
+ ports, environment, dns, volumes, volumes_from, network_disabled,
+ entrypoint, cpu_shares, working_dir, domainname, memswap_limit
)
return self.create_container_from_config(config, name)
@@ -361,21 +476,7 @@ class Client(requests.Session):
format(container))), True)
def events(self):
- u = self._url("/events")
-
- socket = self._stream_result_socket(self.get(u, stream=True))
-
- while True:
- chunk = socket.recv(4096)
- if chunk:
- # Messages come in the format of length, data, newline.
- length, data = chunk.split("\n", 1)
- length = int(length, 16)
- if length > len(data):
- data += socket.recv(length - len(data))
- yield json.loads(data)
- else:
- break
+ return self._stream_helper(self.get(self._url('/events'), stream=True))
def export(self, container):
if isinstance(container, dict):
@@ -385,6 +486,12 @@ class Client(requests.Session):
self._raise_for_status(res)
return res.raw
+ def get_image(self, image):
+ res = self._get(self._url("/images/{0}/get".format(image)),
+ stream=True)
+ self._raise_for_status(res)
+ return res.raw
+
def history(self, image):
res = self._get(self._url("/images/{0}/history".format(image)))
self._raise_for_status(res)
@@ -392,6 +499,8 @@ class Client(requests.Session):
def images(self, name=None, quiet=False, all=False, viz=False):
if viz:
+ if utils.compare_version('1.7', self._version) >= 0:
+ raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
@@ -404,33 +513,44 @@ class Client(requests.Session):
return [x['Id'] for x in res]
return res
- def import_image(self, src, data=None, repository=None, tag=None):
+ def import_image(self, src=None, repository=None, tag=None, image=None):
u = self._url("/images/create")
params = {
'repo': repository,
'tag': tag
}
- try:
- # XXX: this is ways not optimal but the only way
- # for now to import tarballs through the API
- fic = open(src)
- data = fic.read()
- fic.close()
- src = "-"
- except IOError:
- # file does not exists or not a file (URL)
- data = None
- if isinstance(src, six.string_types):
- params['fromSrc'] = src
- return self._result(self._post(u, data=data, params=params))
-
- return self._result(self._post(u, data=src, params=params))
+
+ if src:
+ try:
+ # XXX: this is ways not optimal but the only way
+ # for now to import tarballs through the API
+ fic = open(src)
+ data = fic.read()
+ fic.close()
+ src = "-"
+ except IOError:
+ # file does not exists or not a file (URL)
+ data = None
+ if isinstance(src, six.string_types):
+ params['fromSrc'] = src
+ return self._result(self._post(u, data=data, params=params))
+ return self._result(self._post(u, data=src, params=params))
+
+ if image:
+ params['fromImage'] = image
+ return self._result(self._post(u, data=None, params=params))
+
+ raise Exception("Must specify a src or image")
def info(self):
return self._result(self._get(self._url("/info")),
True)
def insert(self, image, url, path):
+ if utils.compare_version('1.12', self._version) >= 0:
+ raise errors.DeprecatedMethod(
+ 'insert is not available for API version >=1.12'
+ )
api_url = self._url("/images/" + image + "/insert")
params = {
'url': url,
@@ -462,48 +582,69 @@ class Client(requests.Session):
self._raise_for_status(res)
- def login(self, username, password=None, email=None, registry=None):
- url = self._url("/auth")
- if registry is None:
- registry = auth.INDEX_URL
- if getattr(self, '_cfg', None) is None:
- self._cfg = auth.load_config()
- authcfg = auth.resolve_authconfig(self._cfg, registry)
- if 'username' in authcfg and authcfg['username'] == username:
+ def load_image(self, data):
+ res = self._post(self._url("/images/load"), data=data)
+ self._raise_for_status(res)
+
+ def login(self, username, password=None, email=None, registry=None,
+ reauth=False):
+ # If we don't have any auth data so far, try reloading the config file
+ # one more time in case anything showed up in there.
+ if not self._auth_configs:
+ self._auth_configs = auth.load_config()
+
+ registry = registry or auth.INDEX_URL
+
+ authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == username \
+ and not reauth:
return authcfg
+
req_data = {
'username': username,
'password': password,
- 'email': email
+ 'email': email,
+ 'serveraddress': registry,
}
- res = self._result(self._post_json(url, data=req_data), True)
- if res['Status'] == 'Login Succeeded':
- self._cfg['Configs'][registry] = req_data
- return res
- def logs(self, container):
+ response = self._post_json(self._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self._auth_configs[registry] = req_data
+ return self._result(response, json=True)
+
+ def logs(self, container, stdout=True, stderr=True, stream=False,
+ timestamps=False):
if isinstance(container, dict):
container = container.get('Id')
- params = {
- 'logs': 1,
- 'stdout': 1,
- 'stderr': 1
- }
- u = self._url("/containers/{0}/attach".format(container))
- if utils.compare_version('1.6', self._version) < 0:
- return self._result(self._post(u, params=params))
- res = ''
- response = self._result(self._post(u, params=params))
- walker = 0
- while walker < len(response):
- header = response[walker:walker+8]
- walker += 8
- # we don't care about the type of stream since we want both
- # stdout and stderr
- length = struct.unpack(">L", header[4:].encode())[0]
- res += response[walker:walker+length]
- walker += length
- return res
+ if utils.compare_version('1.11', self._version) >= 0:
+ params = {'stderr': stderr and 1 or 0,
+ 'stdout': stdout and 1 or 0,
+ 'timestamps': timestamps and 1 or 0,
+ 'follow': stream and 1 or 0}
+ url = self._url("/containers/{0}/logs".format(container))
+ res = self._get(url, params=params, stream=stream)
+ if stream:
+ return self._multiplexed_socket_stream_helper(res)
+ elif six.PY3:
+ return bytes().join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+ else:
+ return str().join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+ return self.attach(
+ container,
+ stdout=stdout,
+ stderr=stderr,
+ stream=stream,
+ logs=True
+ )
+
+ def ping(self):
+ return self._result(self._get(self._url('/_ping')))
def port(self, container, private_port):
if isinstance(container, dict):
@@ -512,15 +653,17 @@ class Client(requests.Session):
self._raise_for_status(res)
json_ = res.json()
s_port = str(private_port)
- f_port = None
- if s_port in json_['NetworkSettings']['PortMapping']['Udp']:
- f_port = json_['NetworkSettings']['PortMapping']['Udp'][s_port]
- elif s_port in json_['NetworkSettings']['PortMapping']['Tcp']:
- f_port = json_['NetworkSettings']['PortMapping']['Tcp'][s_port]
+ h_ports = None
+
+ h_ports = json_['NetworkSettings']['Ports'].get(s_port + '/udp')
+ if h_ports is None:
+ h_ports = json_['NetworkSettings']['Ports'].get(s_port + '/tcp')
- return f_port
+ return h_ports
def pull(self, repository, tag=None, stream=False):
+ if not tag:
+ repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
if repo_name.count(":") == 1:
repository, tag = repository.rsplit(":", 1)
@@ -532,16 +675,20 @@ class Client(requests.Session):
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
- if getattr(self, '_cfg', None) is None:
- self._cfg = auth.load_config()
- authcfg = auth.resolve_authconfig(self._cfg, registry)
- # do not fail if no atuhentication exists
- # for this specific registry as we can have a readonly pull
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs:
+ self._auth_configs = auth.load_config()
+ authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+
+ # Do not fail here if no authentication exists for this specific
+ # registry as we can have a readonly pull. Just put the header if
+ # we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
- u = self._url("/images/create")
- response = self._post(u, params=params, headers=headers, stream=stream,
- timeout=None)
+
+ response = self._post(self._url('/images/create'), params=params,
+ headers=headers, stream=stream, timeout=None)
if stream:
return self._stream_helper(response)
@@ -552,37 +699,38 @@ class Client(requests.Session):
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push".format(repository))
headers = {}
- if getattr(self, '_cfg', None) is None:
- self._cfg = auth.load_config()
- authcfg = auth.resolve_authconfig(self._cfg, registry)
+
if utils.compare_version('1.5', self._version) >= 0:
- # do not fail if no atuhentication exists
- # for this specific registry as we can have an anon push
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs:
+ self._auth_configs = auth.load_config()
+ authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+
+ # Do not fail here if no authentication exists for this specific
+ # registry as we can have a readonly pull. Just put the header if
+ # we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
- if stream:
- return self._stream_helper(
- self._post_json(u, None, headers=headers, stream=True))
- else:
- return self._result(
- self._post_json(u, None, headers=headers, stream=False))
- if stream:
- return self._stream_helper(
- self._post_json(u, authcfg, stream=True))
+ response = self._post_json(u, None, headers=headers, stream=stream)
else:
- return self._result(self._post_json(u, authcfg, stream=False))
+ response = self._post_json(u, None, stream=stream)
- def remove_container(self, container, v=False, link=False):
+ return stream and self._stream_helper(response) \
+ or self._result(response)
+
+ def remove_container(self, container, v=False, link=False, force=False):
if isinstance(container, dict):
container = container.get('Id')
- params = {'v': v, 'link': link}
+ params = {'v': v, 'link': link, 'force': force}
res = self._delete(self._url("/containers/" + container),
params=params)
self._raise_for_status(res)
- def remove_image(self, image):
- res = self._delete(self._url("/images/" + image))
+ def remove_image(self, image, force=False, noprune=False):
+ params = {'force': force, 'noprune': noprune}
+ res = self._delete(self._url("/images/" + image), params=params)
self._raise_for_status(res)
def restart(self, container, timeout=10):
@@ -599,7 +747,8 @@ class Client(requests.Session):
True)
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=False, links=None):
+ publish_all_ports=False, links=None, privileged=False,
+ dns=None, dns_search=None, volumes_from=None, network_mode=None):
if isinstance(container, dict):
container = container.get('Id')
@@ -613,10 +762,7 @@ class Client(requests.Session):
'LxcConf': lxc_conf
}
if binds:
- bind_pairs = [
- '{0}:{1}'.format(host, dest) for host, dest in binds.items()
- ]
- start_config['Binds'] = bind_pairs
+ start_config['Binds'] = utils.convert_volume_binds(binds)
if port_bindings:
start_config['PortBindings'] = utils.convert_port_bindings(
@@ -626,16 +772,55 @@ class Client(requests.Session):
start_config['PublishAllPorts'] = publish_all_ports
if links:
+ if isinstance(links, dict):
+ links = six.iteritems(links)
+
formatted_links = [
- '{0}:{1}'.format(k, v) for k, v in sorted(six.iteritems(links))
+ '{0}:{1}'.format(k, v) for k, v in sorted(links)
]
start_config['Links'] = formatted_links
+ start_config['Privileged'] = privileged
+
+ if utils.compare_version('1.10', self._version) >= 0:
+ if dns is not None:
+ start_config['Dns'] = dns
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+ start_config['VolumesFrom'] = volumes_from
+ else:
+ warning_message = ('{0!r} parameter is discarded. It is only'
+ ' available for API version greater or equal'
+ ' than 1.10')
+
+ if dns is not None:
+ warnings.warn(warning_message.format('dns'),
+ DeprecationWarning)
+ if volumes_from is not None:
+ warnings.warn(warning_message.format('volumes_from'),
+ DeprecationWarning)
+
+ if dns_search:
+ start_config['DnsSearch'] = dns_search
+
+ if network_mode:
+ start_config['NetworkMode'] = network_mode
+
url = self._url("/containers/{0}/start".format(container))
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
+ def resize(self, container, height, width):
+ if isinstance(container, dict):
+ container = container.get('Id')
+
+ params = {'h': height, 'w': width}
+ url = self._url("/containers/{0}/resize".format(container))
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+
def stop(self, container, timeout=10):
if isinstance(container, dict):
container = container.get('Id')
diff --git a/docker/errors.py b/docker/errors.py
new file mode 100644
index 0000000..85a6d45
--- /dev/null
+++ b/docker/errors.py
@@ -0,0 +1,65 @@
+# Copyright 2014 dotCloud inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+
+
+class APIError(requests.exceptions.HTTPError):
+ def __init__(self, message, response, explanation=None):
+ # requests 1.2 supports response as a keyword argument, but
+ # requests 1.1 doesn't
+ super(APIError, self).__init__(message)
+ self.response = response
+
+ self.explanation = explanation
+
+ if self.explanation is None and response.content:
+ self.explanation = response.content.strip()
+
+ def __str__(self):
+ message = super(APIError, self).__str__()
+
+ if self.is_client_error():
+ message = '%s Client Error: %s' % (
+ self.response.status_code, self.response.reason)
+
+ elif self.is_server_error():
+ message = '%s Server Error: %s' % (
+ self.response.status_code, self.response.reason)
+
+ if self.explanation:
+ message = '%s ("%s")' % (message, self.explanation)
+
+ return message
+
+ def is_client_error(self):
+ return 400 <= self.response.status_code < 500
+
+ def is_server_error(self):
+ return 500 <= self.response.status_code < 600
+
+
+class DockerException(Exception):
+ pass
+
+
+class InvalidRepository(DockerException):
+ pass
+
+
+class InvalidConfigFile(DockerException):
+ pass
+
+
+class DeprecatedMethod(DockerException):
+ pass
diff --git a/docker/unixconn/unixconn.py b/docker/unixconn/unixconn.py
index c9565a2..3d3f7bc 100644
--- a/docker/unixconn/unixconn.py
+++ b/docker/unixconn/unixconn.py
@@ -36,11 +36,11 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
- sock.connect(self.base_url.replace("unix:/", ""))
+ sock.connect(self.base_url.replace("http+unix:/", ""))
self.sock = sock
def _extract_path(self, url):
- #remove the base_url entirely..
+ # remove the base_url entirely..
return url.replace(self.base_url, "")
def request(self, method, url, **kwargs):
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 386a01a..86ddd35 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,3 +1,4 @@
from .utils import (
- compare_version, convert_port_bindings, mkbuildcontext, ping, tar
+ compare_version, convert_port_bindings, convert_volume_binds,
+ mkbuildcontext, ping, tar, parse_repository_tag
) # flake8: noqa
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 8fd9e94..036a504 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -15,6 +15,7 @@
import io
import tarfile
import tempfile
+from distutils.version import StrictVersion
import requests
import six
@@ -51,15 +52,34 @@ def tar(path):
def compare_version(v1, v2):
- return float(v2) - float(v1)
+ """Compare docker versions
+
+ >>> v1 = '1.9'
+ >>> v2 = '1.10'
+ >>> compare_version(v1, v2)
+ 1
+ >>> compare_version(v2, v1)
+ -1
+ >>> compare_version(v2, v2)
+ 0
+ """
+ s1 = StrictVersion(v1)
+ s2 = StrictVersion(v2)
+ if s1 == s2:
+ return 0
+ elif s1 > s2:
+ return -1
+ else:
+ return 1
def ping(url):
try:
res = requests.get(url)
- return res.status >= 400
except Exception:
return False
+ else:
+ return res.status_code < 400
def _convert_port_binding(binding):
@@ -72,6 +92,13 @@ def _convert_port_binding(binding):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
+ elif isinstance(binding, dict):
+ if 'HostPort' in binding:
+ result['HostPort'] = binding['HostPort']
+ if 'HostIp' in binding:
+ result['HostIp'] = binding['HostIp']
+ else:
+ raise ValueError(binding)
else:
result['HostPort'] = binding
@@ -94,3 +121,27 @@ def convert_port_bindings(port_bindings):
else:
result[key] = [_convert_port_binding(v)]
return result
+
+
+def convert_volume_binds(binds):
+ result = []
+ for k, v in binds.items():
+ if isinstance(v, dict):
+ result.append('%s:%s:%s' % (
+ k, v['bind'], 'ro' if v.get('ro', False) else 'rw'
+ ))
+ else:
+ result.append('%s:%s:rw' % (k, v))
+ return result
+
+
+def parse_repository_tag(repo):
+ column_index = repo.rfind(':')
+ if column_index < 0:
+ return repo, None
+ tag = repo[column_index+1:]
+ slash_index = tag.find('/')
+ if slash_index < 0:
+ return repo[:column_index], tag
+
+ return repo, None
diff --git a/docker/version.py b/docker/version.py
new file mode 100644
index 0000000..5189669
--- /dev/null
+++ b/docker/version.py
@@ -0,0 +1 @@
+version = "0.3.2"