diff options
Diffstat (limited to 'docker')
43 files changed, 1570 insertions, 536 deletions
diff --git a/docker/api/build.py b/docker/api/build.py index 419255f..365129a 100644 --- a/docker/api/build.py +++ b/docker/api/build.py @@ -19,7 +19,8 @@ class BuildApiMixin(object): forcerm=False, dockerfile=None, container_limits=None, decode=False, buildargs=None, gzip=False, shmsize=None, labels=None, cache_from=None, target=None, network_mode=None, - squash=None, extra_hosts=None, platform=None, isolation=None): + squash=None, extra_hosts=None, platform=None, isolation=None, + use_config_proxy=True): """ Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` needs to be set. ``path`` can be a local path (to a directory @@ -103,6 +104,10 @@ class BuildApiMixin(object): platform (str): Platform in the format ``os[/arch[/variant]]`` isolation (str): Isolation technology used during build. Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. Returns: A generator for the build output. @@ -116,6 +121,7 @@ class BuildApiMixin(object): remote = context = None headers = {} container_limits = container_limits or {} + buildargs = buildargs or {} if path is None and fileobj is None: raise TypeError("Either path or fileobj needs to be provided.") if gzip and encoding is not None: @@ -168,6 +174,10 @@ class BuildApiMixin(object): } params.update(container_limits) + if use_config_proxy: + proxy_args = self._proxy_configs.get_environment() + for k, v in proxy_args.items(): + buildargs.setdefault(k, v) if buildargs: params.update({'buildargs': json.dumps(buildargs)}) @@ -286,30 +296,21 @@ class BuildApiMixin(object): # If we don't have any auth data so far, try reloading the config # file one more time in case anything showed up in there. - if not self._auth_configs: + if not self._auth_configs or self._auth_configs.is_empty: log.debug("No auth config in memory - loading from filesystem") - self._auth_configs = auth.load_config() + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) # Send the full auth configuration (if any exists), since the build # could use any (or all) of the registries. if self._auth_configs: - auth_data = {} - if self._auth_configs.get('credsStore'): - # Using a credentials store, we need to retrieve the - # credentials for each registry listed in the config.json file - # Matches CLI behavior: https://github.com/docker/docker/blob/ - # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/ - # credentials/native_store.go#L68-L83 - for registry in self._auth_configs.get('auths', {}).keys(): - auth_data[registry] = auth.resolve_authconfig( - self._auth_configs, registry, - credstore_env=self.credstore_env, - ) - else: - auth_data = self._auth_configs.get('auths', {}).copy() - # See https://github.com/docker/docker-py/issues/1683 - if auth.INDEX_NAME in auth_data: - auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME] + auth_data = self._auth_configs.get_all_credentials() + + # See https://github.com/docker/docker-py/issues/1683 + if (auth.INDEX_URL not in auth_data and + auth.INDEX_NAME in auth_data): + auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {}) log.debug( 'Sending auth config ({0})'.format( @@ -317,9 +318,10 @@ class BuildApiMixin(object): ) ) - headers['X-Registry-Config'] = auth.encode_header( - auth_data - ) + if auth_data: + headers['X-Registry-Config'] = auth.encode_header( + auth_data + ) else: log.debug('No auth config found') @@ -331,7 +333,14 @@ def process_dockerfile(dockerfile, path): abs_dockerfile = dockerfile if not os.path.isabs(dockerfile): abs_dockerfile = os.path.join(path, dockerfile) - + if constants.IS_WINDOWS_PLATFORM and path.startswith( + constants.WINDOWS_LONGPATH_PREFIX): + abs_dockerfile = '{}{}'.format( + constants.WINDOWS_LONGPATH_PREFIX, + os.path.normpath( + abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):] + ) + ) if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or os.path.relpath(abs_dockerfile, path).startswith('..')): # Dockerfile not in context - read data to insert into tar later diff --git a/docker/api/client.py b/docker/api/client.py index 91da1c8..35dc84e 100644 --- a/docker/api/client.py +++ b/docker/api/client.py @@ -22,20 +22,26 @@ from .volume import VolumeApiMixin from .. import auth from ..constants import ( DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM, - DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS, - MINIMUM_DOCKER_API_VERSION + DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION, + STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS ) from ..errors import ( DockerException, InvalidVersion, TLSParameterError, create_api_error_from_http_exception ) from ..tls import TLSConfig -from ..transport import SSLAdapter, UnixAdapter +from ..transport import SSLHTTPAdapter, UnixHTTPAdapter from ..utils import utils, check_resource, update_headers, config -from ..utils.socket import frames_iter, socket_raw_iter +from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor from ..utils.json_stream import json_stream +from ..utils.proxy import ProxyConfig try: - from ..transport import NpipeAdapter + from ..transport import NpipeHTTPAdapter +except ImportError: + pass + +try: + from ..transport import SSHHTTPAdapter except ImportError: pass @@ -76,7 +82,7 @@ class APIClient( base_url (str): URL to the Docker server. For example, ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. version (str): The version of the API to use. Set to ``auto`` to - automatically detect the server's version. Default: ``1.30`` + automatically detect the server's version. Default: ``1.35`` timeout (int): Default timeout for API calls, in seconds. tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass ``True`` to enable it with default options, or pass a @@ -95,7 +101,7 @@ class APIClient( def __init__(self, base_url=None, version=None, timeout=DEFAULT_TIMEOUT_SECONDS, tls=False, - user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS, + user_agent=DEFAULT_USER_AGENT, num_pools=None, credstore_env=None): super(APIClient, self).__init__() @@ -109,16 +115,29 @@ class APIClient( self.headers['User-Agent'] = user_agent self._general_configs = config.load_general_config() + + proxy_config = self._general_configs.get('proxies', {}) + try: + proxies = proxy_config[base_url] + except KeyError: + proxies = proxy_config.get('default', {}) + + self._proxy_configs = ProxyConfig.from_dict(proxies) + self._auth_configs = auth.load_config( - config_dict=self._general_configs + config_dict=self._general_configs, credstore_env=credstore_env, ) self.credstore_env = credstore_env base_url = utils.parse_host( base_url, IS_WINDOWS_PLATFORM, tls=bool(tls) ) + # SSH has a different default for num_pools to all other adapters + num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \ + base_url.startswith('ssh://') else DEFAULT_NUM_POOLS + if base_url.startswith('http+unix://'): - self._custom_adapter = UnixAdapter( + self._custom_adapter = UnixHTTPAdapter( base_url, timeout, pool_connections=num_pools ) self.mount('http+docker://', self._custom_adapter) @@ -132,7 +151,7 @@ class APIClient( 'The npipe:// protocol is only supported on Windows' ) try: - self._custom_adapter = NpipeAdapter( + self._custom_adapter = NpipeHTTPAdapter( base_url, timeout, pool_connections=num_pools ) except NameError: @@ -141,12 +160,25 @@ class APIClient( ) self.mount('http+docker://', self._custom_adapter) self.base_url = 'http+docker://localnpipe' + elif base_url.startswith('ssh://'): + try: + self._custom_adapter = SSHHTTPAdapter( + base_url, timeout, pool_connections=num_pools + ) + except NameError: + raise DockerException( + 'Install paramiko package to enable ssh:// support' + ) + self.mount('http+docker://ssh', self._custom_adapter) + self._unmount('http://', 'https://') + self.base_url = 'http+docker://ssh' else: # Use SSLAdapter for the ability to specify SSL version if isinstance(tls, TLSConfig): tls.configure_client(self) elif tls: - self._custom_adapter = SSLAdapter(pool_connections=num_pools) + self._custom_adapter = SSLHTTPAdapter( + pool_connections=num_pools) self.mount('https://', self._custom_adapter) self.base_url = base_url @@ -279,6 +311,8 @@ class APIClient( self._raise_for_status(response) if self.base_url == "http+docker://localnpipe": sock = response.raw._fp.fp.raw.sock + elif self.base_url.startswith('http+docker://ssh'): + sock = response.raw._fp.fp.channel elif six.PY3: sock = response.raw._fp.fp.raw if self.base_url.startswith("https://"): @@ -362,19 +396,23 @@ class APIClient( for out in response.iter_content(chunk_size, decode): yield out - def _read_from_socket(self, response, stream, tty=False): + def _read_from_socket(self, response, stream, tty=True, demux=False): socket = self._get_raw_response_socket(response) - gen = None - if tty is False: - gen = frames_iter(socket) + gen = frames_iter(socket, tty) + + if demux: + # The generator will output tuples (stdout, stderr) + gen = (demux_adaptor(*frame) for frame in gen) else: - gen = socket_raw_iter(socket) + # The generator will output strings + gen = (data for (_, data) in gen) if stream: return gen else: - return six.binary_type().join(gen) + # Wait for all the frames, concatenate them, and return the result + return consume_socket_output(gen, demux=demux) def _disable_socket_timeout(self, socket): """ Depending on the combination of python version and whether we're @@ -457,4 +495,6 @@ class APIClient( Returns: None """ - self._auth_configs = auth.load_config(dockercfg_path) + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) diff --git a/docker/api/config.py b/docker/api/config.py index 767bef2..93e5168 100644 --- a/docker/api/config.py +++ b/docker/api/config.py @@ -42,7 +42,7 @@ class ConfigApiMixin(object): Retrieve config metadata Args: - id (string): Full ID of the config to remove + id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata diff --git a/docker/api/container.py b/docker/api/container.py index d4f75f5..45bd352 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -1,19 +1,21 @@ -import six from datetime import datetime +import six + from .. import errors from .. import utils from ..constants import DEFAULT_DATA_CHUNK_SIZE -from ..types import ( - CancellableStream, ContainerConfig, EndpointConfig, HostConfig, - NetworkingConfig -) +from ..types import CancellableStream +from ..types import ContainerConfig +from ..types import EndpointConfig +from ..types import HostConfig +from ..types import NetworkingConfig class ContainerApiMixin(object): @utils.check_resource('container') def attach(self, container, stdout=True, stderr=True, - stream=False, logs=False): + stream=False, logs=False, demux=False): """ Attach to a container. @@ -28,11 +30,15 @@ class ContainerApiMixin(object): stream (bool): Return container output progressively as an iterator of strings, rather than a single string. logs (bool): Include the container's previous output. + demux (bool): Keep stdout and stderr separate. Returns: - By default, the container's output as a single string. + By default, the container's output as a single string (two if + ``demux=True``: one for stdout and one for stderr). - If ``stream=True``, an iterator of output strings. + If ``stream=True``, an iterator of output strings. If + ``demux=True``, two iterators are returned: one for stdout and one + for stderr. Raises: :py:class:`docker.errors.APIError` @@ -54,8 +60,7 @@ class ContainerApiMixin(object): response = self._post(u, headers=headers, params=params, stream=True) output = self._read_from_socket( - response, stream, self._check_is_tty(container) - ) + response, stream, self._check_is_tty(container), demux=demux) if stream: return CancellableStream(output, response) @@ -169,7 +174,8 @@ class ContainerApiMixin(object): - `exited` (int): Only containers with specified exit code - `status` (str): One of ``restarting``, ``running``, ``paused``, ``exited`` - - `label` (str): format either ``"key"`` or ``"key=value"`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. - `id` (str): The id of the container. - `name` (str): The name of the container. - `ancestor` (str): Filter by container ancestor. Format of @@ -218,7 +224,8 @@ class ContainerApiMixin(object): working_dir=None, domainname=None, host_config=None, mac_address=None, labels=None, stop_signal=None, networking_config=None, healthcheck=None, - stop_timeout=None, runtime=None): + stop_timeout=None, runtime=None, + use_config_proxy=True): """ Creates a container. Parameters are similar to those for the ``docker run`` command except it doesn't support the attach options (``-a``). @@ -387,6 +394,10 @@ class ContainerApiMixin(object): runtime (str): Runtime to use with this container. healthcheck (dict): Specify a test to perform to check that the container is healthy. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being created. Returns: A dictionary with an image 'Id' key and a 'Warnings' key. @@ -400,6 +411,14 @@ class ContainerApiMixin(object): if isinstance(volumes, six.string_types): volumes = [volumes, ] + if isinstance(environment, dict): + environment = utils.utils.format_environment(environment) + + if use_config_proxy: + environment = self._proxy_configs.inject_proxy_environment( + environment + ) or None + config = self.create_container_config( image, command, hostname, user, detach, stdin_open, tty, ports, environment, volumes, @@ -465,30 +484,26 @@ class ContainerApiMixin(object): dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file dns_search (:py:class:`list`): DNS search domains. - extra_hosts (dict): Addtional hostnames to resolve inside the + extra_hosts (dict): Additional hostnames to resolve inside the container, as a mapping of hostname to IP address. group_add (:py:class:`list`): List of additional group names and/or IDs that the container process will run as. init (bool): Run an init inside the container that forwards signals and reaps processes - init_path (str): Path to the docker-init binary ipc_mode (str): Set the IPC mode for the container. - isolation (str): Isolation technology to use. Default: `None`. - links (dict or list of tuples): Either a dictionary mapping name - to alias or as a list of ``(name, alias)`` tuples. - log_config (dict): Logging configuration, as a dictionary with - keys: - - - ``type`` The logging driver name. - - ``config`` A dictionary of configuration for the logging - driver. - + isolation (str): Isolation technology to use. Default: ``None``. + links (dict): Mapping of links using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to the new + container using the provided alias. Default: ``None``. + log_config (LogConfig): Logging configuration lxc_conf (dict): LXC config. mem_limit (float or str): Memory limit. Accepts float values (which represent the memory limit of the created container in bytes) or a string with a units identification char (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is specified without a units character, bytes are assumed as an + mem_reservation (int or str): Memory soft limit. mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (str or int): Maximum amount of memory + swap a @@ -500,7 +515,7 @@ class ContainerApiMixin(object): network_mode (str): One of: - ``bridge`` Create a new network stack for the container on - on the bridge network. + the bridge network. - ``none`` No networking for this container. - ``container:<name|id>`` Reuse another container's network stack. @@ -543,10 +558,12 @@ class ContainerApiMixin(object): } ulimits (:py:class:`list`): Ulimits to set inside the container, - as a list of dicts. + as a list of :py:class:`docker.types.Ulimit` instances. userns_mode (str): Sets the user namespace mode for the container when user namespace remapping option is enabled. Supported values are: ``host`` + uts_mode (str): Sets the UTS namespace mode for the container. + Supported values are: ``host`` volumes_from (:py:class:`list`): List of container names or IDs to get volumes from. runtime (str): Runtime to use with this container. @@ -609,9 +626,10 @@ class ContainerApiMixin(object): aliases (:py:class:`list`): A list of aliases for this endpoint. Names in that list can be used within the network to reach the container. Defaults to ``None``. - links (:py:class:`list`): A list of links for this endpoint. - Containers declared in this list will be linked to this - container. Defaults to ``None``. + links (dict): Mapping of links for this endpoint using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to this + container using the provided alias. Defaults to ``None``. ipv4_address (str): The IP address of this container on the network, using the IPv4 protocol. Defaults to ``None``. ipv6_address (str): The IP address of this container on the @@ -626,7 +644,7 @@ class ContainerApiMixin(object): >>> endpoint_config = client.create_endpoint_config( aliases=['web', 'app'], - links=['app_db'], + links={'app_db': 'db', 'another': None}, ipv4_address='132.65.0.123' ) @@ -695,6 +713,18 @@ class ContainerApiMixin(object): Raises: :py:class:`docker.errors.APIError` If the server returns an error. + + Example: + + >>> c = docker.APIClient() + >>> f = open('./sh_bin.tar', 'wb') + >>> bits, stat = c.get_archive(container, '/bin/sh') + >>> print(stat) + {'name': 'sh', 'size': 1075464, 'mode': 493, + 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} + >>> for chunk in bits: + ... f.write(chunk) + >>> f.close() """ params = { 'path': path @@ -763,16 +793,16 @@ class ContainerApiMixin(object): Args: container (str): The container to get logs from - stdout (bool): Get ``STDOUT`` - stderr (bool): Get ``STDERR`` - stream (bool): Stream the response - timestamps (bool): Show timestamps + stdout (bool): Get ``STDOUT``. Default ``True`` + stderr (bool): Get ``STDERR``. Default ``True`` + stream (bool): Stream the response. Default ``False`` + timestamps (bool): Show timestamps. Default ``False`` tail (str or int): Output specified number of lines at the end of logs. Either an integer of number of lines or the string ``all``. Default ``all`` since (datetime or int): Show logs since a given datetime or integer epoch (in seconds) - follow (bool): Follow log output + follow (bool): Follow log output. Default ``False`` until (datetime or int): Show logs that occurred before the given datetime or integer epoch (in seconds) @@ -888,9 +918,10 @@ class ContainerApiMixin(object): if '/' in private_port: return port_settings.get(private_port) - h_ports = port_settings.get(private_port + '/tcp') - if h_ports is None: - h_ports = port_settings.get(private_port + '/udp') + for protocol in ['tcp', 'udp', 'sctp']: + h_ports = port_settings.get(private_port + '/' + protocol) + if h_ports: + break return h_ports @@ -1072,7 +1103,8 @@ class ContainerApiMixin(object): Args: container (str): The container to stream statistics from decode (bool): If set to true, stream will be decoded into dicts - on the fly. False by default. + on the fly. Only applicable if ``stream`` is True. + False by default. stream (bool): If set to false, only the current stats will be returned instead of a stream. True by default. @@ -1086,6 +1118,10 @@ class ContainerApiMixin(object): return self._stream_helper(self._get(url, stream=True), decode=decode) else: + if decode: + raise errors.InvalidArgument( + "decode is only available in conjuction with stream=True" + ) return self._result(self._get(url, params={'stream': False}), json=True) diff --git a/docker/api/daemon.py b/docker/api/daemon.py index 76a94cf..f715a13 100644 --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -42,8 +42,8 @@ class DaemonApiMixin(object): Example: - >>> for event in client.events() - ... print event + >>> for event in client.events(decode=True) + ... print(event) {u'from': u'image/with:tag', u'id': u'container-id', u'status': u'start', @@ -54,7 +54,7 @@ class DaemonApiMixin(object): >>> events = client.events() >>> for event in events: - ... print event + ... print(event) >>> # and cancel from another thread >>> events.close() """ @@ -124,13 +124,15 @@ class DaemonApiMixin(object): # If dockercfg_path is passed check to see if the config file exists, # if so load that config. if dockercfg_path and os.path.exists(dockercfg_path): - self._auth_configs = auth.load_config(dockercfg_path) - elif not self._auth_configs: - self._auth_configs = auth.load_config() - - authcfg = auth.resolve_authconfig( - self._auth_configs, registry, credstore_env=self.credstore_env, - ) + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) + elif not self._auth_configs or self._auth_configs.is_empty: + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) + + authcfg = self._auth_configs.resolve_authconfig(registry) # If we found an existing auth config for this registry and username # combination, we can return it immediately unless reauth is requested. if authcfg and authcfg.get('username', None) == username \ @@ -146,9 +148,7 @@ class DaemonApiMixin(object): response = self._post_json(self._url('/auth'), data=req_data) if response.status_code == 200: - if 'auths' not in self._auth_configs: - self._auth_configs['auths'] = {} - self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data + self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) return self._result(response, json=True) def ping(self): diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py index 986d87f..4c49ac3 100644 --- a/docker/api/exec_api.py +++ b/docker/api/exec_api.py @@ -118,7 +118,7 @@ class ExecApiMixin(object): @utils.check_resource('exec_id') def exec_start(self, exec_id, detach=False, tty=False, stream=False, - socket=False): + socket=False, demux=False): """ Start a previously set up exec instance. @@ -130,11 +130,15 @@ class ExecApiMixin(object): stream (bool): Stream response data. Default: False socket (bool): Return the connection socket to allow custom read/write operations. + demux (bool): Return stdout and stderr separately Returns: - (generator or str): If ``stream=True``, a generator yielding - response chunks. If ``socket=True``, a socket object for the - connection. A string containing response data otherwise. + + (generator or str or tuple): If ``stream=True``, a generator + yielding response chunks. If ``socket=True``, a socket object for + the connection. A string containing response data otherwise. If + ``demux=True``, a tuple with two elements of type byte: stdout and + stderr. Raises: :py:class:`docker.errors.APIError` @@ -162,4 +166,4 @@ class ExecApiMixin(object): return self._result(res) if socket: return self._get_raw_response_socket(res) - return self._read_from_socket(res, stream, tty) + return self._read_from_socket(res, stream, tty=tty, demux=demux) diff --git a/docker/api/image.py b/docker/api/image.py index 5f05d88..11c8cf7 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -32,7 +32,7 @@ class ImageApiMixin(object): Example: >>> image = cli.get_image("busybox:latest") - >>> f = open('/tmp/busybox-latest.tar', 'w') + >>> f = open('/tmp/busybox-latest.tar', 'wb') >>> for chunk in image: >>> f.write(chunk) >>> f.close() @@ -70,7 +70,8 @@ class ImageApiMixin(object): filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - - ``label`` (str): format either ``key`` or ``key=value`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. Returns: (dict or list): A list if ``quiet=True``, otherwise a dict. @@ -247,12 +248,15 @@ class ImageApiMixin(object): @utils.minimum_version('1.30') @utils.check_resource('image') - def inspect_distribution(self, image): + def inspect_distribution(self, image, auth_config=None): """ Get image digest and platform information by contacting the registry. Args: image (str): The image name to inspect + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. Returns: (dict): A dict containing distribution data @@ -261,9 +265,21 @@ class ImageApiMixin(object): :py:class:`docker.errors.APIError` If the server returns an error. """ + registry, _ = auth.resolve_repository_name(image) + + headers = {} + if auth_config is None: + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + else: + log.debug('Sending supplied auth config') + headers['X-Registry-Auth'] = auth.encode_header(auth_config) + + url = self._url("/distribution/{0}/json", image) return self._result( - self._get(self._url("/distribution/{0}/json", image)), True + self._get(url, headers=headers), True ) def load_image(self, data, quiet=None): @@ -334,11 +350,11 @@ class ImageApiMixin(object): Args: repository (str): The repository to pull tag (str): The tag to pull - stream (bool): Stream the output as a generator - auth_config (dict): Override the credentials that - :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for - this request. ``auth_config`` should contain the ``username`` - and ``password`` keys to be valid. + stream (bool): Stream the output as a generator. Make sure to + consume the generator, otherwise pull might get cancelled. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` platform (str): Platform in the format ``os[/arch[/variant]]`` @@ -352,8 +368,8 @@ class ImageApiMixin(object): Example: - >>> for line in cli.pull('busybox', stream=True): - ... print(json.dumps(json.loads(line), indent=4)) + >>> for line in cli.pull('busybox', stream=True, decode=True): + ... print(json.dumps(line, indent=4)) { "status": "Pulling image (latest) from busybox", "progressDetail": {}, @@ -413,10 +429,9 @@ class ImageApiMixin(object): repository (str): The repository to push to tag (str): An optional tag to push stream (bool): Stream the output as a blocking generator - auth_config (dict): Override the credentials that - :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for - this request. ``auth_config`` should contain the ``username`` - and ``password`` keys to be valid. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` @@ -428,12 +443,12 @@ class ImageApiMixin(object): If the server returns an error. Example: - >>> for line in cli.push('yourname/app', stream=True): - ... print line - {"status":"Pushing repository yourname/app (1 tags)"} - {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} - {"status":"Image already pushed, skipping","progressDetail":{}, - "id":"511136ea3c5a"} + >>> for line in cli.push('yourname/app', stream=True, decode=True): + ... print(line) + {'status': 'Pushing repository yourname/app (1 tags)'} + {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} + {'status': 'Image already pushed, skipping', 'progressDetail':{}, + 'id': '511136ea3c5a'} ... """ diff --git a/docker/api/network.py b/docker/api/network.py index 57ed8d3..750b91b 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -7,7 +7,7 @@ from .. import utils class NetworkApiMixin(object): def networks(self, names=None, ids=None, filters=None): """ - List networks. Similar to the ``docker networks ls`` command. + List networks. Similar to the ``docker network ls`` command. Args: names (:py:class:`list`): List of names to filter by @@ -15,7 +15,8 @@ class NetworkApiMixin(object): filters (dict): Filters to be processed on the network list. Available filters: - ``driver=[<driver-name>]`` Matches a network's driver. - - ``label=[<key>]`` or ``label=[<key>=<value>]``. + - ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of + such. - ``type=["custom"|"builtin"]`` Filters networks by type. Returns: diff --git a/docker/api/secret.py b/docker/api/secret.py index fa4c2ab..e57952b 100644 --- a/docker/api/secret.py +++ b/docker/api/secret.py @@ -53,7 +53,7 @@ class SecretApiMixin(object): Retrieve secret metadata Args: - id (string): Full ID of the secret to remove + id (string): Full ID of the secret to inspect Returns (dict): A dictionary of metadata diff --git a/docker/api/service.py b/docker/api/service.py index 03b0ca6..e9027bf 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -2,7 +2,8 @@ from .. import auth, errors, utils from ..types import ServiceMode -def _check_api_features(version, task_template, update_config, endpoint_spec): +def _check_api_features(version, task_template, update_config, endpoint_spec, + rollback_config): def raise_version_error(param, min_version): raise errors.InvalidVersion( @@ -18,10 +19,24 @@ def _check_api_features(version, task_template, update_config, endpoint_spec): if 'Monitor' in update_config: raise_version_error('UpdateConfig.monitor', '1.25') + if utils.version_lt(version, '1.28'): + if update_config.get('FailureAction') == 'rollback': + raise_version_error( + 'UpdateConfig.failure_action rollback', '1.28' + ) + if utils.version_lt(version, '1.29'): if 'Order' in update_config: raise_version_error('UpdateConfig.order', '1.29') + if rollback_config is not None: + if utils.version_lt(version, '1.28'): + raise_version_error('rollback_config', '1.28') + + if utils.version_lt(version, '1.29'): + if 'Order' in update_config: + raise_version_error('RollbackConfig.order', '1.29') + if endpoint_spec is not None: if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec: if any(p.get('PublishMode') for p in endpoint_spec['Ports']): @@ -73,6 +88,10 @@ def _check_api_features(version, task_template, update_config, endpoint_spec): if container_spec.get('Isolation') is not None: raise_version_error('ContainerSpec.isolation', '1.35') + if utils.version_lt(version, '1.38'): + if container_spec.get('Init') is not None: + raise_version_error('ContainerSpec.init', '1.38') + if task_template.get('Resources'): if utils.version_lt(version, '1.32'): if task_template['Resources'].get('GenericResources'): @@ -99,7 +118,7 @@ class ServiceApiMixin(object): def create_service( self, task_template, name=None, labels=None, mode=None, update_config=None, networks=None, endpoint_config=None, - endpoint_spec=None + endpoint_spec=None, rollback_config=None ): """ Create a service. @@ -114,8 +133,11 @@ class ServiceApiMixin(object): or global). Defaults to replicated. update_config (UpdateConfig): Specification for the update strategy of the service. Default: ``None`` - networks (:py:class:`list`): List of network names or IDs to attach - the service to. Default: ``None``. + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. endpoint_spec (EndpointSpec): Properties that can be configured to access and load balance a service. Default: ``None``. @@ -129,7 +151,8 @@ class ServiceApiMixin(object): """ _check_api_features( - self._version, task_template, update_config, endpoint_spec + self._version, task_template, update_config, endpoint_spec, + rollback_config ) url = self._url('/services/create') @@ -160,6 +183,9 @@ class ServiceApiMixin(object): if update_config is not None: data['UpdateConfig'] = update_config + if rollback_config is not None: + data['RollbackConfig'] = rollback_config + return self._result( self._post_json(url, data=data, headers=headers), True ) @@ -176,7 +202,8 @@ class ServiceApiMixin(object): into the service inspect output. Returns: - ``True`` if successful. + (dict): A dictionary of the server-side representation of the + service, including all relevant properties. Raises: :py:class:`docker.errors.APIError` @@ -336,7 +363,8 @@ class ServiceApiMixin(object): def update_service(self, service, version, task_template=None, name=None, labels=None, mode=None, update_config=None, networks=None, endpoint_config=None, - endpoint_spec=None, fetch_current_spec=False): + endpoint_spec=None, fetch_current_spec=False, + rollback_config=None): """ Update a service. @@ -354,15 +382,18 @@ class ServiceApiMixin(object): or global). Defaults to replicated. update_config (UpdateConfig): Specification for the update strategy of the service. Default: ``None``. - networks (:py:class:`list`): List of network names or IDs to attach - the service to. Default: ``None``. + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. endpoint_spec (EndpointSpec): Properties that can be configured to access and load balance a service. Default: ``None``. fetch_current_spec (boolean): Use the undefined settings from the current specification of the service. Default: ``False`` Returns: - ``True`` if successful. + A dictionary containing a ``Warnings`` key. Raises: :py:class:`docker.errors.APIError` @@ -370,7 +401,8 @@ class ServiceApiMixin(object): """ _check_api_features( - self._version, task_template, update_config, endpoint_spec + self._version, task_template, update_config, endpoint_spec, + rollback_config ) if fetch_current_spec: @@ -416,6 +448,11 @@ class ServiceApiMixin(object): else: data['UpdateConfig'] = current.get('UpdateConfig') + if rollback_config is not None: + data['RollbackConfig'] = rollback_config + else: + data['RollbackConfig'] = current.get('RollbackConfig') + if networks is not None: converted_networks = utils.convert_service_networks(networks) if utils.version_lt(self._version, '1.25'): @@ -440,5 +477,4 @@ class ServiceApiMixin(object): resp = self._post_json( url, data=data, params={'version': version}, headers=headers ) - self._raise_for_status(resp) - return True + return self._result(resp, json=True) diff --git a/docker/api/swarm.py b/docker/api/swarm.py index 04595da..897f08e 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -1,5 +1,6 @@ import logging from six.moves import http_client +from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE from .. import errors from .. import types from .. import utils @@ -82,7 +83,9 @@ class SwarmApiMixin(object): @utils.minimum_version('1.24') def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377', - force_new_cluster=False, swarm_spec=None): + force_new_cluster=False, swarm_spec=None, + default_addr_pool=None, subnet_size=None, + data_path_addr=None): """ Initialize a new Swarm using the current connected engine as the first node. @@ -107,9 +110,17 @@ class SwarmApiMixin(object): swarm_spec (dict): Configuration settings of the new Swarm. Use ``APIClient.create_swarm_spec`` to generate a valid configuration. Default: None + default_addr_pool (list of strings): Default Address Pool specifies + default subnet pools for global scope networks. Each pool + should be specified as a CIDR block, like '10.0.0.0/8'. + Default: None + subnet_size (int): SubnetSize specifies the subnet size of the + networks created from the default subnet pool. Default: None + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. Returns: - ``True`` if successful. + (str): The ID of the created node. Raises: :py:class:`docker.errors.APIError` @@ -119,15 +130,44 @@ class SwarmApiMixin(object): url = self._url('/swarm/init') if swarm_spec is not None and not isinstance(swarm_spec, dict): raise TypeError('swarm_spec must be a dictionary') + + if default_addr_pool is not None: + if utils.version_lt(self._version, '1.39'): + raise errors.InvalidVersion( + 'Address pool is only available for API version >= 1.39' + ) + # subnet_size becomes 0 if not set with default_addr_pool + if subnet_size is None: + subnet_size = DEFAULT_SWARM_SUBNET_SIZE + + if subnet_size is not None: + if utils.version_lt(self._version, '1.39'): + raise errors.InvalidVersion( + 'Subnet size is only available for API version >= 1.39' + ) + # subnet_size is ignored if set without default_addr_pool + if default_addr_pool is None: + default_addr_pool = DEFAULT_SWARM_ADDR_POOL + data = { 'AdvertiseAddr': advertise_addr, 'ListenAddr': listen_addr, + 'DefaultAddrPool': default_addr_pool, + 'SubnetSize': subnet_size, 'ForceNewCluster': force_new_cluster, 'Spec': swarm_spec, } + + if data_path_addr is not None: + if utils.version_lt(self._version, '1.30'): + raise errors.InvalidVersion( + 'Data address path is only available for ' + 'API version >= 1.30' + ) + data['DataPathAddr'] = data_path_addr + response = self._post_json(url, data=data) - self._raise_for_status(response) - return True + return self._result(response, json=True) @utils.minimum_version('1.24') def inspect_swarm(self): @@ -165,7 +205,7 @@ class SwarmApiMixin(object): @utils.minimum_version('1.24') def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377', - advertise_addr=None): + advertise_addr=None, data_path_addr=None): """ Make this Engine join a swarm that has already been created. @@ -176,7 +216,7 @@ class SwarmApiMixin(object): listen_addr (string): Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel - Endpoint (VTEP). Default: ``None`` + Endpoint (VTEP). Default: ``'0.0.0.0:2377`` advertise_addr (string): Externally reachable address advertised to other nodes. This can either be an address/port combination in the form ``192.168.1.1:4567``, or an interface followed by a @@ -184,6 +224,8 @@ class SwarmApiMixin(object): the port number from the listen address is used. If AdvertiseAddr is not specified, it will be automatically detected when possible. Default: ``None`` + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. Returns: ``True`` if the request went through. @@ -193,11 +235,20 @@ class SwarmApiMixin(object): If the server returns an error. """ data = { - "RemoteAddrs": remote_addrs, - "ListenAddr": listen_addr, - "JoinToken": join_token, - "AdvertiseAddr": advertise_addr, + 'RemoteAddrs': remote_addrs, + 'ListenAddr': listen_addr, + 'JoinToken': join_token, + 'AdvertiseAddr': advertise_addr, } + + if data_path_addr is not None: + if utils.version_lt(self._version, '1.30'): + raise errors.InvalidVersion( + 'Data address path is only available for ' + 'API version >= 1.30' + ) + data['DataPathAddr'] = data_path_addr + url = self._url('/swarm/join') response = self._post_json(url, data=data) self._raise_for_status(response) @@ -355,8 +406,10 @@ class SwarmApiMixin(object): return True @utils.minimum_version('1.24') - def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, - rotate_manager_token=False): + def update_swarm(self, version, swarm_spec=None, + rotate_worker_token=False, + rotate_manager_token=False, + rotate_manager_unlock_key=False): """ Update the Swarm's configuration @@ -370,6 +423,8 @@ class SwarmApiMixin(object): ``False``. rotate_manager_token (bool): Rotate the manager join token. Default: ``False``. + rotate_manager_unlock_key (bool): Rotate the manager unlock key. + Default: ``False``. Returns: ``True`` if the request went through. @@ -378,12 +433,20 @@ class SwarmApiMixin(object): :py:class:`docker.errors.APIError` If the server returns an error. """ - url = self._url('/swarm/update') - response = self._post_json(url, data=swarm_spec, params={ + params = { 'rotateWorkerToken': rotate_worker_token, 'rotateManagerToken': rotate_manager_token, 'version': version - }) + } + if rotate_manager_unlock_key: + if utils.version_lt(self._version, '1.25'): + raise errors.InvalidVersion( + 'Rotate manager unlock key ' + 'is only available for API version >= 1.25' + ) + params['rotateManagerUnlockKey'] = rotate_manager_unlock_key + + response = self._post_json(url, data=swarm_spec, params=params) self._raise_for_status(response) return True diff --git a/docker/auth.py b/docker/auth.py index 9635f93..6a07ea2 100644 --- a/docker/auth.py +++ b/docker/auth.py @@ -2,9 +2,9 @@ import base64 import json import logging -import dockerpycreds import six +from . import credentials from . import errors from .utils import config @@ -39,11 +39,11 @@ def resolve_index_name(index_name): def get_config_header(client, registry): log.debug('Looking for auth config') - if not client._auth_configs: + if not client._auth_configs or client._auth_configs.is_empty: log.debug( "No auth config in memory - loading from filesystem" ) - client._auth_configs = load_config() + client._auth_configs = load_config(credstore_env=client.credstore_env) authcfg = resolve_authconfig( client._auth_configs, registry, credstore_env=client.credstore_env ) @@ -70,81 +70,258 @@ def split_repo_name(repo_name): def get_credential_store(authconfig, registry): - if not registry or registry == INDEX_NAME: - registry = 'https://index.docker.io/v1/' + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig) + return authconfig.get_credential_store(registry) + + +class AuthConfig(dict): + def __init__(self, dct, credstore_env=None): + if 'auths' not in dct: + dct['auths'] = {} + self.update(dct) + self._credstore_env = credstore_env + self._stores = {} + + @classmethod + def parse_auth(cls, entries, raise_on_error=False): + """ + Parses authentication entries + + Args: + entries: Dict of authentication entries. + raise_on_error: If set to true, an invalid format will raise + InvalidConfigFile + + Returns: + Authentication registry. + """ + + conf = {} + for registry, entry in six.iteritems(entries): + if not isinstance(entry, dict): + log.debug( + 'Config entry for key {0} is not auth config'.format( + registry + ) + ) + # We sometimes fall back to parsing the whole config as if it + # was the auth config by itself, for legacy purposes. In that + # case, we fail silently and return an empty conf if any of the + # keys is not formatted properly. + if raise_on_error: + raise errors.InvalidConfigFile( + 'Invalid configuration for registry {0}'.format( + registry + ) + ) + return {} + if 'identitytoken' in entry: + log.debug( + 'Found an IdentityToken entry for registry {0}'.format( + registry + ) + ) + conf[registry] = { + 'IdentityToken': entry['identitytoken'] + } + continue # Other values are irrelevant if we have a token + + if 'auth' not in entry: + # Starting with engine v1.11 (API 1.23), an empty dictionary is + # a valid value in the auths config. + # https://github.com/docker/compose/issues/3265 + log.debug( + 'Auth data for {0} is absent. Client might be using a ' + 'credentials store instead.'.format(registry) + ) + conf[registry] = {} + continue - return authconfig.get('credHelpers', {}).get(registry) or authconfig.get( - 'credsStore' - ) + username, password = decode_auth(entry['auth']) + log.debug( + 'Found entry (registry={0}, username={1})' + .format(repr(registry), repr(username)) + ) + conf[registry] = { + 'username': username, + 'password': password, + 'email': entry.get('email'), + 'serveraddress': registry, + } + return conf + + @classmethod + def load_config(cls, config_path, config_dict, credstore_env=None): + """ + Loads authentication data from a Docker configuration file in the given + root directory or if config_path is passed use given path. + Lookup priority: + explicit config_path parameter > DOCKER_CONFIG environment + variable > ~/.docker/config.json > ~/.dockercfg + """ + + if not config_dict: + config_file = config.find_config_file(config_path) + + if not config_file: + return cls({}, credstore_env) + try: + with open(config_file) as f: + config_dict = json.load(f) + except (IOError, KeyError, ValueError) as e: + # Likely missing new Docker config file or it's in an + # unknown format, continue to attempt to read old location + # and format. + log.debug(e) + return cls(_load_legacy_config(config_file), credstore_env) + + res = {} + if config_dict.get('auths'): + log.debug("Found 'auths' section") + res.update({ + 'auths': cls.parse_auth( + config_dict.pop('auths'), raise_on_error=True + ) + }) + if config_dict.get('credsStore'): + log.debug("Found 'credsStore' section") + res.update({'credsStore': config_dict.pop('credsStore')}) + if config_dict.get('credHelpers'): + log.debug("Found 'credHelpers' section") + res.update({'credHelpers': config_dict.pop('credHelpers')}) + if res: + return cls(res, credstore_env) -def resolve_authconfig(authconfig, registry=None, credstore_env=None): - """ - Returns the authentication data from the given auth configuration for a - specific registry. As with the Docker client, legacy entries in the config - with full URLs are stripped down to hostnames before checking for a match. - Returns None if no match was found. - """ + log.debug( + "Couldn't find auth-related section ; attempting to interpret " + "as auth-only file" + ) + return cls({'auths': cls.parse_auth(config_dict)}, credstore_env) - if 'credHelpers' in authconfig or 'credsStore' in authconfig: - store_name = get_credential_store(authconfig, registry) - if store_name is not None: - log.debug( - 'Using credentials store "{0}"'.format(store_name) - ) - cfg = _resolve_authconfig_credstore( - authconfig, registry, store_name, env=credstore_env - ) - if cfg is not None: - return cfg - log.debug('No entry in credstore - fetching from auth dict') + @property + def auths(self): + return self.get('auths', {}) - # Default to the public index server - registry = resolve_index_name(registry) if registry else INDEX_NAME - log.debug("Looking for auth entry for {0}".format(repr(registry))) + @property + def creds_store(self): + return self.get('credsStore', None) - authdict = authconfig.get('auths', {}) - if registry in authdict: - log.debug("Found {0}".format(repr(registry))) - return authdict[registry] + @property + def cred_helpers(self): + return self.get('credHelpers', {}) - for key, conf in six.iteritems(authdict): - if resolve_index_name(key) == registry: - log.debug("Found {0}".format(repr(key))) - return conf + @property + def is_empty(self): + return ( + not self.auths and not self.creds_store and not self.cred_helpers + ) - log.debug("No entry found") - return None + def resolve_authconfig(self, registry=None): + """ + Returns the authentication data from the given auth configuration for a + specific registry. As with the Docker client, legacy entries in the + config with full URLs are stripped down to hostnames before checking + for a match. Returns None if no match was found. + """ + + if self.creds_store or self.cred_helpers: + store_name = self.get_credential_store(registry) + if store_name is not None: + log.debug( + 'Using credentials store "{0}"'.format(store_name) + ) + cfg = self._resolve_authconfig_credstore(registry, store_name) + if cfg is not None: + return cfg + log.debug('No entry in credstore - fetching from auth dict') + # Default to the public index server + registry = resolve_index_name(registry) if registry else INDEX_NAME + log.debug("Looking for auth entry for {0}".format(repr(registry))) -def _resolve_authconfig_credstore(authconfig, registry, credstore_name, - env=None): - if not registry or registry == INDEX_NAME: - # The ecosystem is a little schizophrenic with index.docker.io VS - # docker.io - in that case, it seems the full URL is necessary. - registry = INDEX_URL - log.debug("Looking for auth entry for {0}".format(repr(registry))) - store = dockerpycreds.Store(credstore_name, environment=env) - try: - data = store.get(registry) - res = { - 'ServerAddress': registry, - } - if data['Username'] == TOKEN_USERNAME: - res['IdentityToken'] = data['Secret'] - else: - res.update({ - 'Username': data['Username'], - 'Password': data['Secret'], - }) - return res - except dockerpycreds.CredentialsNotFound as e: - log.debug('No entry found') + if registry in self.auths: + log.debug("Found {0}".format(repr(registry))) + return self.auths[registry] + + for key, conf in six.iteritems(self.auths): + if resolve_index_name(key) == registry: + log.debug("Found {0}".format(repr(key))) + return conf + + log.debug("No entry found") return None - except dockerpycreds.StoreError as e: - raise errors.DockerException( - 'Credentials store error: {0}'.format(repr(e)) - ) + + def _resolve_authconfig_credstore(self, registry, credstore_name): + if not registry or registry == INDEX_NAME: + # The ecosystem is a little schizophrenic with index.docker.io VS + # docker.io - in that case, it seems the full URL is necessary. + registry = INDEX_URL + log.debug("Looking for auth entry for {0}".format(repr(registry))) + store = self._get_store_instance(credstore_name) + try: + data = store.get(registry) + res = { + 'ServerAddress': registry, + } + if data['Username'] == TOKEN_USERNAME: + res['IdentityToken'] = data['Secret'] + else: + res.update({ + 'Username': data['Username'], + 'Password': data['Secret'], + }) + return res + except credentials.CredentialsNotFound: + log.debug('No entry found') + return None + except credentials.StoreError as e: + raise errors.DockerException( + 'Credentials store error: {0}'.format(repr(e)) + ) + + def _get_store_instance(self, name): + if name not in self._stores: + self._stores[name] = credentials.Store( + name, environment=self._credstore_env + ) + return self._stores[name] + + def get_credential_store(self, registry): + if not registry or registry == INDEX_NAME: + registry = INDEX_URL + + return self.cred_helpers.get(registry) or self.creds_store + + def get_all_credentials(self): + auth_data = self.auths.copy() + if self.creds_store: + # Retrieve all credentials from the default store + store = self._get_store_instance(self.creds_store) + for k in store.list().keys(): + auth_data[k] = self._resolve_authconfig_credstore( + k, self.creds_store + ) + auth_data[convert_to_hostname(k)] = auth_data[k] + + # credHelpers entries take priority over all others + for reg, store_name in self.cred_helpers.items(): + auth_data[reg] = self._resolve_authconfig_credstore( + reg, store_name + ) + auth_data[convert_to_hostname(reg)] = auth_data[reg] + + return auth_data + + def add_auth(self, reg, data): + self['auths'][reg] = data + + +def resolve_authconfig(authconfig, registry=None, credstore_env=None): + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig, credstore_env) + return authconfig.resolve_authconfig(registry) def convert_to_hostname(url): @@ -177,100 +354,11 @@ def parse_auth(entries, raise_on_error=False): Authentication registry. """ - conf = {} - for registry, entry in six.iteritems(entries): - if not isinstance(entry, dict): - log.debug( - 'Config entry for key {0} is not auth config'.format(registry) - ) - # We sometimes fall back to parsing the whole config as if it was - # the auth config by itself, for legacy purposes. In that case, we - # fail silently and return an empty conf if any of the keys is not - # formatted properly. - if raise_on_error: - raise errors.InvalidConfigFile( - 'Invalid configuration for registry {0}'.format(registry) - ) - return {} - if 'identitytoken' in entry: - log.debug('Found an IdentityToken entry for registry {0}'.format( - registry - )) - conf[registry] = { - 'IdentityToken': entry['identitytoken'] - } - continue # Other values are irrelevant if we have a token, skip. - - if 'auth' not in entry: - # Starting with engine v1.11 (API 1.23), an empty dictionary is - # a valid value in the auths config. - # https://github.com/docker/compose/issues/3265 - log.debug( - 'Auth data for {0} is absent. Client might be using a ' - 'credentials store instead.'.format(registry) - ) - conf[registry] = {} - continue - - username, password = decode_auth(entry['auth']) - log.debug( - 'Found entry (registry={0}, username={1})' - .format(repr(registry), repr(username)) - ) + return AuthConfig.parse_auth(entries, raise_on_error) - conf[registry] = { - 'username': username, - 'password': password, - 'email': entry.get('email'), - 'serveraddress': registry, - } - return conf - -def load_config(config_path=None, config_dict=None): - """ - Loads authentication data from a Docker configuration file in the given - root directory or if config_path is passed use given path. - Lookup priority: - explicit config_path parameter > DOCKER_CONFIG environment variable > - ~/.docker/config.json > ~/.dockercfg - """ - - if not config_dict: - config_file = config.find_config_file(config_path) - - if not config_file: - return {} - try: - with open(config_file) as f: - config_dict = json.load(f) - except (IOError, KeyError, ValueError) as e: - # Likely missing new Docker config file or it's in an - # unknown format, continue to attempt to read old location - # and format. - log.debug(e) - return _load_legacy_config(config_file) - - res = {} - if config_dict.get('auths'): - log.debug("Found 'auths' section") - res.update({ - 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True) - }) - if config_dict.get('credsStore'): - log.debug("Found 'credsStore' section") - res.update({'credsStore': config_dict.pop('credsStore')}) - if config_dict.get('credHelpers'): - log.debug("Found 'credHelpers' section") - res.update({'credHelpers': config_dict.pop('credHelpers')}) - if res: - return res - - log.debug( - "Couldn't find auth-related section ; attempting to interpret" - "as auth-only file" - ) - return {'auths': parse_auth(config_dict)} +def load_config(config_path=None, config_dict=None, credstore_env=None): + return AuthConfig.load_config(config_path, config_dict, credstore_env) def _load_legacy_config(config_file): diff --git a/docker/client.py b/docker/client.py index 8d4a52b..99ae196 100644 --- a/docker/client.py +++ b/docker/client.py @@ -26,7 +26,7 @@ class DockerClient(object): base_url (str): URL to the Docker server. For example, ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. version (str): The version of the API to use. Set to ``auto`` to - automatically detect the server's version. Default: ``1.30`` + automatically detect the server's version. Default: ``1.35`` timeout (int): Default timeout for API calls, in seconds. tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass ``True`` to enable it with default options, or pass a @@ -62,7 +62,7 @@ class DockerClient(object): Args: version (str): The version of the API to use. Set to ``auto`` to - automatically detect the server's version. Default: ``1.30`` + automatically detect the server's version. Default: ``1.35`` timeout (int): Default timeout for API calls, in seconds. ssl_version (int): A valid `SSL version`_. assert_hostname (bool): Verify the hostname of the server. diff --git a/docker/constants.py b/docker/constants.py index 7565a76..4b96e1c 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -14,7 +14,17 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \ 'is deprecated and non-functional. Please remove it.' IS_WINDOWS_PLATFORM = (sys.platform == 'win32') +WINDOWS_LONGPATH_PREFIX = '\\\\?\\' DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version) DEFAULT_NUM_POOLS = 25 + +# The OpenSSH server default value for MaxSessions is 10 which means we can +# use up to 9, leaving the final session for the underlying SSH connection. +# For more details see: https://github.com/docker/docker-py/issues/2246 +DEFAULT_NUM_POOLS_SSH = 9 + DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 + +DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8'] +DEFAULT_SWARM_SUBNET_SIZE = 24 diff --git a/docker/credentials/__init__.py b/docker/credentials/__init__.py new file mode 100644 index 0000000..31ad28e --- /dev/null +++ b/docker/credentials/__init__.py @@ -0,0 +1,4 @@ +# flake8: noqa +from .store import Store +from .errors import StoreError, CredentialsNotFound +from .constants import * diff --git a/docker/credentials/constants.py b/docker/credentials/constants.py new file mode 100644 index 0000000..6a82d8d --- /dev/null +++ b/docker/credentials/constants.py @@ -0,0 +1,4 @@ +PROGRAM_PREFIX = 'docker-credential-' +DEFAULT_LINUX_STORE = 'secretservice' +DEFAULT_OSX_STORE = 'osxkeychain' +DEFAULT_WIN32_STORE = 'wincred' diff --git a/docker/credentials/errors.py b/docker/credentials/errors.py new file mode 100644 index 0000000..42a1bc1 --- /dev/null +++ b/docker/credentials/errors.py @@ -0,0 +1,25 @@ +class StoreError(RuntimeError): + pass + + +class CredentialsNotFound(StoreError): + pass + + +class InitializationError(StoreError): + pass + + +def process_store_error(cpe, program): + message = cpe.output.decode('utf-8') + if 'credentials not found in native keychain' in message: + return CredentialsNotFound( + 'No matching credentials in {}'.format( + program + ) + ) + return StoreError( + 'Credentials store {} exited with "{}".'.format( + program, cpe.output.decode('utf-8').strip() + ) + ) diff --git a/docker/credentials/store.py b/docker/credentials/store.py new file mode 100644 index 0000000..0017888 --- /dev/null +++ b/docker/credentials/store.py @@ -0,0 +1,107 @@ +import errno +import json +import subprocess + +import six + +from . import constants +from . import errors +from .utils import create_environment_dict +from .utils import find_executable + + +class Store(object): + def __init__(self, program, environment=None): + """ Create a store object that acts as an interface to + perform the basic operations for storing, retrieving + and erasing credentials using `program`. + """ + self.program = constants.PROGRAM_PREFIX + program + self.exe = find_executable(self.program) + self.environment = environment + if self.exe is None: + raise errors.InitializationError( + '{} not installed or not available in PATH'.format( + self.program + ) + ) + + def get(self, server): + """ Retrieve credentials for `server`. If no credentials are found, + a `StoreError` will be raised. + """ + if not isinstance(server, six.binary_type): + server = server.encode('utf-8') + data = self._execute('get', server) + result = json.loads(data.decode('utf-8')) + + # docker-credential-pass will return an object for inexistent servers + # whereas other helpers will exit with returncode != 0. For + # consistency, if no significant data is returned, + # raise CredentialsNotFound + if result['Username'] == '' and result['Secret'] == '': + raise errors.CredentialsNotFound( + 'No matching credentials in {}'.format(self.program) + ) + + return result + + def store(self, server, username, secret): + """ Store credentials for `server`. Raises a `StoreError` if an error + occurs. + """ + data_input = json.dumps({ + 'ServerURL': server, + 'Username': username, + 'Secret': secret + }).encode('utf-8') + return self._execute('store', data_input) + + def erase(self, server): + """ Erase credentials for `server`. Raises a `StoreError` if an error + occurs. + """ + if not isinstance(server, six.binary_type): + server = server.encode('utf-8') + self._execute('erase', server) + + def list(self): + """ List stored credentials. Requires v0.4.0+ of the helper. + """ + data = self._execute('list', None) + return json.loads(data.decode('utf-8')) + + def _execute(self, subcmd, data_input): + output = None + env = create_environment_dict(self.environment) + try: + if six.PY3: + output = subprocess.check_output( + [self.exe, subcmd], input=data_input, env=env, + ) + else: + process = subprocess.Popen( + [self.exe, subcmd], stdin=subprocess.PIPE, + stdout=subprocess.PIPE, env=env, + ) + output, _ = process.communicate(data_input) + if process.returncode != 0: + raise subprocess.CalledProcessError( + returncode=process.returncode, cmd='', output=output + ) + except subprocess.CalledProcessError as e: + raise errors.process_store_error(e, self.program) + except OSError as e: + if e.errno == errno.ENOENT: + raise errors.StoreError( + '{} not installed or not available in PATH'.format( + self.program + ) + ) + else: + raise errors.StoreError( + 'Unexpected OS error "{}", errno={}'.format( + e.strerror, e.errno + ) + ) + return output diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py new file mode 100644 index 0000000..3f720ef --- /dev/null +++ b/docker/credentials/utils.py @@ -0,0 +1,38 @@ +import distutils.spawn +import os +import sys + + +def find_executable(executable, path=None): + """ + As distutils.spawn.find_executable, but on Windows, look up + every extension declared in PATHEXT instead of just `.exe` + """ + if sys.platform != 'win32': + return distutils.spawn.find_executable(executable, path) + + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep) + base, ext = os.path.splitext(executable) + + if not os.path.isfile(executable): + for p in paths: + for ext in extensions: + f = os.path.join(p, base + ext) + if os.path.isfile(f): + return f + return None + else: + return executable + + +def create_environment_dict(overrides): + """ + Create and return a copy of os.environ with the specified overrides + """ + result = os.environ.copy() + result.update(overrides or {}) + return result diff --git a/docker/errors.py b/docker/errors.py index 0253695..c340dcb 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -63,6 +63,9 @@ class APIError(requests.exceptions.HTTPError, DockerException): if self.response is not None: return self.response.status_code + def is_error(self): + return self.is_client_error() or self.is_server_error() + def is_client_error(self): if self.status_code is None: return False diff --git a/docker/models/containers.py b/docker/models/containers.py index b33a718..d1f275f 100644 --- a/docker/models/containers.py +++ b/docker/models/containers.py @@ -15,7 +15,12 @@ from .resource import Collection, Model class Container(Model): - + """ Local representation of a container object. Detailed configuration may + be accessed through the :py:attr:`attrs` attribute. Note that local + attributes are cached; users may call :py:meth:`reload` to + query the Docker daemon for the current properties, causing + :py:attr:`attrs` to be refreshed. + """ @property def name(self): """ @@ -57,6 +62,13 @@ class Container(Model): return self.attrs['State']['Status'] return self.attrs['State'] + @property + def ports(self): + """ + The ports that the container exposes as a dictionary. + """ + return self.attrs.get('NetworkSettings', {}).get('Ports', {}) + def attach(self, **kwargs): """ Attach to this container. @@ -139,7 +151,7 @@ class Container(Model): def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, privileged=False, user='', detach=False, stream=False, - socket=False, environment=None, workdir=None): + socket=False, environment=None, workdir=None, demux=False): """ Run a command inside this container. Similar to ``docker exec``. @@ -161,16 +173,18 @@ class Container(Model): the following format ``["PASSWORD=xxx"]`` or ``{"PASSWORD": "xxx"}``. workdir (str): Path to working directory for this exec session + demux (bool): Return stdout and stderr separately Returns: (ExecResult): A tuple of (exit_code, output) exit_code: (int): Exit code for the executed command or ``None`` if - either ``stream```or ``socket`` is ``True``. - output: (generator or str): + either ``stream`` or ``socket`` is ``True``. + output: (generator, bytes, or tuple): If ``stream=True``, a generator yielding response chunks. If ``socket=True``, a socket object for the connection. - A string containing response data otherwise. + If ``demux=True``, a tuple of two bytes: stdout and stderr. + A bytestring containing response data otherwise. Raises: :py:class:`docker.errors.APIError` @@ -179,10 +193,11 @@ class Container(Model): resp = self.client.api.exec_create( self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty, privileged=privileged, user=user, environment=environment, - workdir=workdir + workdir=workdir, ) exec_output = self.client.api.exec_start( - resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket + resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket, + demux=demux ) if socket or stream: return ExecResult(None, exec_output) @@ -228,6 +243,17 @@ class Container(Model): Raises: :py:class:`docker.errors.APIError` If the server returns an error. + + Example: + + >>> f = open('./sh_bin.tar', 'wb') + >>> bits, stat = container.get_archive('/bin/sh') + >>> print(stat) + {'name': 'sh', 'size': 1075464, 'mode': 493, + 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} + >>> for chunk in bits: + ... f.write(chunk) + >>> f.close() """ return self.client.api.get_archive(self.id, path, chunk_size) @@ -253,16 +279,16 @@ class Container(Model): generator you can iterate over to retrieve log output as it happens. Args: - stdout (bool): Get ``STDOUT`` - stderr (bool): Get ``STDERR`` - stream (bool): Stream the response - timestamps (bool): Show timestamps + stdout (bool): Get ``STDOUT``. Default ``True`` + stderr (bool): Get ``STDERR``. Default ``True`` + stream (bool): Stream the response. Default ``False`` + timestamps (bool): Show timestamps. Default ``False`` tail (str or int): Output specified number of lines at the end of logs. Either an integer of number of lines or the string ``all``. Default ``all`` since (datetime or int): Show logs since a given datetime or integer epoch (in seconds) - follow (bool): Follow log output + follow (bool): Follow log output. Default ``False`` until (datetime or int): Show logs that occurred before the given datetime or integer epoch (in seconds) @@ -380,7 +406,8 @@ class Container(Model): Args: decode (bool): If set to true, stream will be decoded into dicts - on the fly. False by default. + on the fly. Only applicable if ``stream`` is True. + False by default. stream (bool): If set to false, only the current stats will be returned instead of a stream. True by default. @@ -521,12 +548,15 @@ class ContainerCollection(Collection): cap_add (list of str): Add kernel capabilities. For example, ``["SYS_ADMIN", "MKNOD"]``. cap_drop (list of str): Drop kernel capabilities. + cgroup_parent (str): Override the default parent cgroup. cpu_count (int): Number of usable CPUs (Windows only). cpu_percent (int): Usable percentage of the available CPUs (Windows only). cpu_period (int): The length of a CPU period in microseconds. cpu_quota (int): Microseconds of CPU time that the container can get in a CPU period. + cpu_rt_period (int): Limit CPU real-time period in microseconds. + cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds. cpu_shares (int): CPU shares (relative weight). cpuset_cpus (str): CPUs in which to allow execution (``0-3``, ``0,1``). @@ -558,7 +588,7 @@ class ContainerCollection(Collection): environment (dict or list): Environment variables to set inside the container, as a dictionary or a list of strings in the format ``["SOMEVARIABLE=xxx"]``. - extra_hosts (dict): Addtional hostnames to resolve inside the + extra_hosts (dict): Additional hostnames to resolve inside the container, as a mapping of hostname to IP address. group_add (:py:class:`list`): List of additional group names and/or IDs that the container process will run as. @@ -570,19 +600,17 @@ class ContainerCollection(Collection): init_path (str): Path to the docker-init binary ipc_mode (str): Set the IPC mode for the container. isolation (str): Isolation technology to use. Default: `None`. + kernel_memory (int or str): Kernel memory limit labels (dict or list): A dictionary of name-value labels (e.g. ``{"label1": "value1", "label2": "value2"}``) or a list of names of labels to set with empty values (e.g. ``["label1", "label2"]``) - links (dict or list of tuples): Either a dictionary mapping name - to alias or as a list of ``(name, alias)`` tuples. - log_config (dict): Logging configuration, as a dictionary with - keys: - - - ``type`` The logging driver name. - - ``config`` A dictionary of configuration for the logging - driver. - + links (dict): Mapping of links using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to the new + container using the provided alias. Default: ``None``. + log_config (LogConfig): Logging configuration. + lxc_conf (dict): LXC config. mac_address (str): MAC address to assign to the container. mem_limit (int or str): Memory limit. Accepts float values (which represent the memory limit of the created container in @@ -590,6 +618,7 @@ class ContainerCollection(Collection): (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is specified without a units character, bytes are assumed as an intended unit. + mem_reservation (int or str): Memory soft limit. mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (str or int): Maximum amount of memory + swap a @@ -628,8 +657,8 @@ class ContainerCollection(Collection): The keys of the dictionary are the ports to bind inside the container, either as an integer or a string in the form - ``port/protocol``, where the protocol is either ``tcp`` or - ``udp``. + ``port/protocol``, where the protocol is either ``tcp``, + ``udp``, or ``sctp``. The values of the dictionary are the corresponding ports to open on the host, which can be either: @@ -662,6 +691,7 @@ class ContainerCollection(Collection): For example: ``{"Name": "on-failure", "MaximumRetryCount": 5}`` + runtime (str): Runtime to use with this container. security_opt (:py:class:`list`): A list of string values to customize labels for MLS systems, such as SELinux. shm_size (str or int): Size of /dev/shm (e.g. ``1G``). @@ -691,13 +721,21 @@ class ContainerCollection(Collection): } tty (bool): Allocate a pseudo-TTY. - ulimits (:py:class:`list`): Ulimits to set inside the container, as - a list of dicts. + ulimits (:py:class:`list`): Ulimits to set inside the container, + as a list of :py:class:`docker.types.Ulimit` instances. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. user (str or int): Username or UID to run commands as inside the container. userns_mode (str): Sets the user namespace mode for the container when user namespace remapping option is enabled. Supported values are: ``host`` + uts_mode (str): Sets the UTS namespace mode for the container. + Supported values are: ``host`` + version (str): The version of the API to use. Set to ``auto`` to + automatically detect the server's version. Default: ``1.35`` volume_driver (str): The name of a volume driver/plugin. volumes (dict or list): A dictionary to configure volumes mounted inside the container. The key is either the host path or a @@ -717,7 +755,6 @@ class ContainerCollection(Collection): volumes_from (:py:class:`list`): List of container names or IDs to get volumes from. working_dir (str): Path to the working directory. - runtime (str): Runtime to use with this container. Returns: The container logs, either ``STDOUT``, ``STDERR``, or both, @@ -863,7 +900,8 @@ class ContainerCollection(Collection): - `exited` (int): Only containers with specified exit code - `status` (str): One of ``restarting``, ``running``, ``paused``, ``exited`` - - `label` (str): format either ``"key"`` or ``"key=value"`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. - `id` (str): The id of the container. - `name` (str): The name of the container. - `ancestor` (str): Filter by container ancestor. Format of @@ -932,8 +970,8 @@ RUN_CREATE_KWARGS = [ 'stdin_open', 'stop_signal', 'tty', + 'use_config_proxy', 'user', - 'volume_driver', 'working_dir', ] @@ -995,7 +1033,9 @@ RUN_HOST_CONFIG_KWARGS = [ 'tmpfs', 'ulimits', 'userns_mode', + 'uts_mode', 'version', + 'volume_driver', 'volumes_from', 'runtime' ] diff --git a/docker/models/images.py b/docker/models/images.py index 41632c6..757a5a4 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -1,5 +1,6 @@ import itertools import re +import warnings import six @@ -59,14 +60,20 @@ class Image(Model): """ return self.client.api.history(self.id) - def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False): """ Get a tarball of an image. Similar to the ``docker save`` command. Args: - chunk_size (int): The number of bytes returned by each iteration - of the generator. If ``None``, data will be streamed as it is - received. Default: 2 MB + chunk_size (int): The generator will return up to that much data + per iteration, but may return less. If ``None``, data will be + streamed as it is received. Default: 2 MB + named (str or bool): If ``False`` (default), the tarball will not + retain repository and tag information for this image. If set + to ``True``, the first tag in the :py:attr:`~tags` list will + be used to identify the image. Alternatively, any element of + the :py:attr:`~tags` list can be used as an argument to use + that specific tag as the saved identifier. Returns: (generator): A stream of raw archive data. @@ -78,12 +85,22 @@ class Image(Model): Example: >>> image = cli.get_image("busybox:latest") - >>> f = open('/tmp/busybox-latest.tar', 'w') + >>> f = open('/tmp/busybox-latest.tar', 'wb') >>> for chunk in image: >>> f.write(chunk) >>> f.close() """ - return self.client.api.get_image(self.id, chunk_size) + img = self.id + if named: + img = self.tags[0] if self.tags else img + if isinstance(named, six.string_types): + if named not in self.tags: + raise InvalidArgument( + "{} is not a valid tag for this image".format(named) + ) + img = named + + return self.client.api.get_image(img, chunk_size) def tag(self, repository, tag=None, **kwargs): """ @@ -241,6 +258,10 @@ class ImageCollection(Collection): platform (str): Platform in the format ``os[/arch[/variant]]``. isolation (str): Isolation technology used during build. Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. Returns: (tuple): The first item is the :py:class:`Image` object for the @@ -294,22 +315,26 @@ class ImageCollection(Collection): """ return self.prepare_model(self.client.api.inspect_image(name)) - def get_registry_data(self, name): + def get_registry_data(self, name, auth_config=None): """ Gets the registry data for an image. Args: name (str): The name of the image. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. Returns: (:py:class:`RegistryData`): The data object. + Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return RegistryData( image_name=name, - attrs=self.client.api.inspect_distribution(name), + attrs=self.client.api.inspect_distribution(name, auth_config), client=self.client, collection=self, ) @@ -325,7 +350,8 @@ class ImageCollection(Collection): filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - - ``label`` (str): format either ``key`` or ``key=value`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. Returns: (list of :py:class:`Image`): The images. @@ -383,10 +409,9 @@ class ImageCollection(Collection): Args: repository (str): The repository to pull tag (str): The tag to pull - auth_config (dict): Override the credentials that - :py:meth:`~docker.client.DockerClient.login` has set for - this request. ``auth_config`` should contain the ``username`` - and ``password`` keys to be valid. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. platform (str): Platform in the format ``os[/arch[/variant]]`` Returns: @@ -409,7 +434,21 @@ class ImageCollection(Collection): if not tag: repository, tag = parse_repository_tag(repository) - self.client.api.pull(repository, tag=tag, **kwargs) + if 'stream' in kwargs: + warnings.warn( + '`stream` is not a valid parameter for this method' + ' and will be overridden' + ) + del kwargs['stream'] + + pull_log = self.client.api.pull( + repository, tag=tag, stream=True, **kwargs + ) + for _ in pull_log: + # We don't do anything with the logs, but we need + # to keep the connection alive and wait for the image + # to be pulled. + pass if tag: return self.get('{0}{2}{1}'.format( repository, tag, '@' if tag.startswith('sha256:') else ':' diff --git a/docker/models/networks.py b/docker/models/networks.py index be3291a..f944c8e 100644 --- a/docker/models/networks.py +++ b/docker/models/networks.py @@ -190,7 +190,8 @@ class NetworkCollection(Collection): filters (dict): Filters to be processed on the network list. Available filters: - ``driver=[<driver-name>]`` Matches a network's driver. - - ``label=[<key>]`` or ``label=[<key>=<value>]``. + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. - ``type=["custom"|"builtin"]`` Filters networks by type. greedy (bool): Fetch more details for each network individually. You might want this to get the containers attached to them. diff --git a/docker/models/services.py b/docker/models/services.py index 458d2c8..a35687b 100644 --- a/docker/models/services.py +++ b/docker/models/services.py @@ -1,6 +1,6 @@ import copy from docker.errors import create_unexpected_kwargs_error, InvalidArgument -from docker.types import TaskTemplate, ContainerSpec, ServiceMode +from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode from .resource import Model, Collection @@ -42,7 +42,7 @@ class Service(Model): ``label``, and ``desired-state``. Returns: - (:py:class:`list`): List of task dictionaries. + :py:class:`list`: List of task dictionaries. Raises: :py:class:`docker.errors.APIError` @@ -84,26 +84,27 @@ class Service(Model): def logs(self, **kwargs): """ - Get log stream for the service. - Note: This method works only for services with the ``json-file`` - or ``journald`` logging drivers. - - Args: - details (bool): Show extra details provided to logs. - Default: ``False`` - follow (bool): Keep connection open to read logs as they are - sent by the Engine. Default: ``False`` - stdout (bool): Return logs from ``stdout``. Default: ``False`` - stderr (bool): Return logs from ``stderr``. Default: ``False`` - since (int): UNIX timestamp for the logs staring point. - Default: 0 - timestamps (bool): Add timestamps to every log line. - tail (string or int): Number of log lines to be returned, - counting from the current end of the logs. Specify an - integer or ``'all'`` to output all log lines. - Default: ``all`` - - Returns (generator): Logs for the service. + Get log stream for the service. + Note: This method works only for services with the ``json-file`` + or ``journald`` logging drivers. + + Args: + details (bool): Show extra details provided to logs. + Default: ``False`` + follow (bool): Keep connection open to read logs as they are + sent by the Engine. Default: ``False`` + stdout (bool): Return logs from ``stdout``. Default: ``False`` + stderr (bool): Return logs from ``stderr``. Default: ``False`` + since (int): UNIX timestamp for the logs staring point. + Default: 0 + timestamps (bool): Add timestamps to every log line. + tail (string or int): Number of log lines to be returned, + counting from the current end of the logs. Specify an + integer or ``'all'`` to output all log lines. + Default: ``all`` + + Returns: + generator: Logs for the service. """ is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get( 'TTY', False @@ -118,7 +119,7 @@ class Service(Model): replicas (int): The number of containers that should be running. Returns: - ``True``if successful. + bool: ``True`` if successful. """ if 'Global' in self.attrs['Spec']['Mode'].keys(): @@ -134,7 +135,7 @@ class Service(Model): Force update the service even if no changes require it. Returns: - ``True``if successful. + bool: ``True`` if successful. """ return self.update(force_update=True, fetch_current_spec=True) @@ -152,13 +153,20 @@ class ServiceCollection(Collection): image (str): The image name to use for the containers. command (list of str or str): Command to run. args (list of str): Arguments to the command. - constraints (list of str): Placement constraints. + constraints (list of str): :py:class:`~docker.types.Placement` + constraints. + preferences (list of tuple): :py:class:`~docker.types.Placement` + preferences. + platforms (list of tuple): A list of platform constraints + expressed as ``(arch, os)`` tuples. container_labels (dict): Labels to apply to the container. endpoint_spec (EndpointSpec): Properties that can be configured to access and load balance a service. Default: ``None``. env (list of str): Environment variables, in the form ``KEY=val``. hostname (string): Hostname to set on the container. + init (boolean): Run an init inside the container that forwards + signals and reaps processes isolation (string): Isolation technology used by the service's containers. Only used for Windows containers. labels (dict): Labels to apply to the service. @@ -170,16 +178,19 @@ class ServiceCollection(Collection): ``source:target:options``, where options is either ``ro`` or ``rw``. name (str): Name to give to the service. - networks (list of str): List of network names or IDs to attach - the service to. Default: ``None``. + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. resources (Resources): Resource limits and reservations. restart_policy (RestartPolicy): Restart policy for containers. - secrets (list of :py:class:`docker.types.SecretReference`): List + secrets (list of :py:class:`~docker.types.SecretReference`): List of secrets accessible to containers for this service. stop_grace_period (int): Amount of time to wait for containers to terminate before forcefully killing them. update_config (UpdateConfig): Specification for the update strategy of the service. Default: ``None`` + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` user (str): User to run commands as. workdir (str): Working directory for commands to run. tty (boolean): Whether a pseudo-TTY should be allocated. @@ -195,13 +206,14 @@ class ServiceCollection(Collection): the container's `hosts` file. dns_config (DNSConfig): Specification for DNS related configurations in resolver configuration file. - configs (:py:class:`list`): List of :py:class:`ConfigReference` - that will be exposed to the service. + configs (:py:class:`list`): List of + :py:class:`~docker.types.ConfigReference` that will be exposed + to the service. privileges (Privileges): Security options for the service's containers. Returns: - (:py:class:`Service`) The created service. + :py:class:`Service`: The created service. Raises: :py:class:`docker.errors.APIError` @@ -223,7 +235,7 @@ class ServiceCollection(Collection): into the output. Returns: - (:py:class:`Service`): The service. + :py:class:`Service`: The service. Raises: :py:class:`docker.errors.NotFound` @@ -248,7 +260,7 @@ class ServiceCollection(Collection): Default: ``None``. Returns: - (list of :py:class:`Service`): The services. + list of :py:class:`Service`: The services. Raises: :py:class:`docker.errors.APIError` @@ -272,6 +284,7 @@ CONTAINER_SPEC_KWARGS = [ 'hostname', 'hosts', 'image', + 'init', 'isolation', 'labels', 'mounts', @@ -302,6 +315,12 @@ CREATE_SERVICE_KWARGS = [ 'endpoint_spec', ] +PLACEMENT_KWARGS = [ + 'constraints', + 'preferences', + 'platforms', +] + def _get_create_service_kwargs(func_name, kwargs): # Copy over things which can be copied directly @@ -321,10 +340,12 @@ def _get_create_service_kwargs(func_name, kwargs): if 'container_labels' in kwargs: container_spec_kwargs['labels'] = kwargs.pop('container_labels') - if 'constraints' in kwargs: - task_template_kwargs['placement'] = { - 'Constraints': kwargs.pop('constraints') - } + placement = {} + for key in copy.copy(kwargs): + if key in PLACEMENT_KWARGS: + placement[key] = kwargs.pop(key) + placement = Placement(**placement) + task_template_kwargs['placement'] = placement if 'log_driver' in kwargs: task_template_kwargs['log_driver'] = { diff --git a/docker/models/swarm.py b/docker/models/swarm.py index 7396e73..755c17d 100644 --- a/docker/models/swarm.py +++ b/docker/models/swarm.py @@ -34,7 +34,8 @@ class Swarm(Model): get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', - force_new_cluster=False, **kwargs): + force_new_cluster=False, default_addr_pool=None, + subnet_size=None, data_path_addr=None, **kwargs): """ Initialize a new swarm on this Engine. @@ -56,6 +57,14 @@ class Swarm(Model): is used. Default: ``0.0.0.0:2377`` force_new_cluster (bool): Force creating a new Swarm, even if already part of one. Default: False + default_addr_pool (list of str): Default Address Pool specifies + default subnet pools for global scope networks. Each pool + should be specified as a CIDR block, like '10.0.0.0/8'. + Default: None + subnet_size (int): SubnetSize specifies the subnet size of the + networks created from the default subnet pool. Default: None + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. task_history_retention_limit (int): Maximum number of tasks history stored. snapshot_interval (int): Number of logs entries between snapshot. @@ -89,7 +98,7 @@ class Swarm(Model): created in the orchestrator. Returns: - ``True`` if the request went through. + (str): The ID of the created node. Raises: :py:class:`docker.errors.APIError` @@ -99,7 +108,8 @@ class Swarm(Model): >>> client.swarm.init( advertise_addr='eth0', listen_addr='0.0.0.0:5000', - force_new_cluster=False, snapshot_interval=5000, + force_new_cluster=False, default_addr_pool=['10.20.0.0/16], + subnet_size=24, snapshot_interval=5000, log_entries_for_slow_followers=1200 ) @@ -107,11 +117,15 @@ class Swarm(Model): init_kwargs = { 'advertise_addr': advertise_addr, 'listen_addr': listen_addr, - 'force_new_cluster': force_new_cluster + 'force_new_cluster': force_new_cluster, + 'default_addr_pool': default_addr_pool, + 'subnet_size': subnet_size, + 'data_path_addr': data_path_addr, } init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs) - self.client.api.init_swarm(**init_kwargs) + node_id = self.client.api.init_swarm(**init_kwargs) self.reload() + return node_id def join(self, *args, **kwargs): return self.client.api.join_swarm(*args, **kwargs) @@ -137,7 +151,7 @@ class Swarm(Model): unlock.__doc__ = APIClient.unlock_swarm.__doc__ def update(self, rotate_worker_token=False, rotate_manager_token=False, - **kwargs): + rotate_manager_unlock_key=False, **kwargs): """ Update the swarm's configuration. @@ -150,7 +164,8 @@ class Swarm(Model): ``False``. rotate_manager_token (bool): Rotate the manager join token. Default: ``False``. - + rotate_manager_unlock_key (bool): Rotate the manager unlock key. + Default: ``False``. Raises: :py:class:`docker.errors.APIError` If the server returns an error. @@ -164,5 +179,6 @@ class Swarm(Model): version=self.version, swarm_spec=self.client.api.create_swarm_spec(**kwargs), rotate_worker_token=rotate_worker_token, - rotate_manager_token=rotate_manager_token + rotate_manager_token=rotate_manager_token, + rotate_manager_unlock_key=rotate_manager_unlock_key ) diff --git a/docker/tls.py b/docker/tls.py index 4900e9f..d4671d1 100644 --- a/docker/tls.py +++ b/docker/tls.py @@ -2,7 +2,7 @@ import os import ssl from . import errors -from .transport import SSLAdapter +from .transport import SSLHTTPAdapter class TLSConfig(object): @@ -105,7 +105,7 @@ class TLSConfig(object): if self.cert: client.cert = self.cert - client.mount('https://', SSLAdapter( + client.mount('https://', SSLHTTPAdapter( ssl_version=self.ssl_version, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py index abbee18..e37fc3b 100644 --- a/docker/transport/__init__.py +++ b/docker/transport/__init__.py @@ -1,8 +1,13 @@ # flake8: noqa -from .unixconn import UnixAdapter -from .ssladapter import SSLAdapter +from .unixconn import UnixHTTPAdapter +from .ssladapter import SSLHTTPAdapter try: - from .npipeconn import NpipeAdapter + from .npipeconn import NpipeHTTPAdapter from .npipesocket import NpipeSocket except ImportError: pass + +try: + from .sshconn import SSHHTTPAdapter +except ImportError: + pass diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py new file mode 100644 index 0000000..4d819b6 --- /dev/null +++ b/docker/transport/basehttpadapter.py @@ -0,0 +1,8 @@ +import requests.adapters + + +class BaseHTTPAdapter(requests.adapters.HTTPAdapter): + def close(self): + super(BaseHTTPAdapter, self).close() + if hasattr(self, 'pools'): + self.pools.clear() diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py index ab9b904..aa05538 100644 --- a/docker/transport/npipeconn.py +++ b/docker/transport/npipeconn.py @@ -1,6 +1,7 @@ import six import requests.adapters +from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants from .npipesocket import NpipeSocket @@ -68,7 +69,7 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): return conn or self._new_conn() -class NpipeAdapter(requests.adapters.HTTPAdapter): +class NpipeHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', 'pools', @@ -81,7 +82,7 @@ class NpipeAdapter(requests.adapters.HTTPAdapter): self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) - super(NpipeAdapter, self).__init__() + super(NpipeHTTPAdapter, self).__init__() def get_connection(self, url, proxies=None): with self.pools.lock: @@ -103,6 +104,3 @@ class NpipeAdapter(requests.adapters.HTTPAdapter): # anyway, we simply return the path URL directly. # See also: https://github.com/docker/docker-sdk-python/issues/811 return request.path_url - - def close(self): - self.pools.clear() diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py index c04b39d..ef02031 100644 --- a/docker/transport/npipesocket.py +++ b/docker/transport/npipesocket.py @@ -87,10 +87,6 @@ class NpipeSocket(object): def dup(self): return NpipeSocket(self._handle) - @check_closed - def fileno(self): - return int(self._handle) - def getpeername(self): return self._address diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py new file mode 100644 index 0000000..5a8ceb0 --- /dev/null +++ b/docker/transport/sshconn.py @@ -0,0 +1,116 @@ +import paramiko +import requests.adapters +import six + +from docker.transport.basehttpadapter import BaseHTTPAdapter +from .. import constants + +if six.PY3: + import http.client as httplib +else: + import httplib + +try: + import requests.packages.urllib3 as urllib3 +except ImportError: + import urllib3 + +RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer + + +class SSHConnection(httplib.HTTPConnection, object): + def __init__(self, ssh_transport, timeout=60): + super(SSHConnection, self).__init__( + 'localhost', timeout=timeout + ) + self.ssh_transport = ssh_transport + self.timeout = timeout + + def connect(self): + sock = self.ssh_transport.open_session() + sock.settimeout(self.timeout) + sock.exec_command('docker system dial-stdio') + self.sock = sock + + +class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): + scheme = 'ssh' + + def __init__(self, ssh_client, timeout=60, maxsize=10): + super(SSHConnectionPool, self).__init__( + 'localhost', timeout=timeout, maxsize=maxsize + ) + self.ssh_transport = ssh_client.get_transport() + self.timeout = timeout + + def _new_conn(self): + return SSHConnection(self.ssh_transport, self.timeout) + + # When re-using connections, urllib3 calls fileno() on our + # SSH channel instance, quickly overloading our fd limit. To avoid this, + # we override _get_conn + def _get_conn(self, timeout): + conn = None + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") + + except six.moves.queue.Empty: + if self.block: + raise urllib3.exceptions.EmptyPoolError( + self, + "Pool reached maximum size and no more " + "connections are allowed." + ) + pass # Oh well, we'll create a new connection then + + return conn or self._new_conn() + + +class SSHHTTPAdapter(BaseHTTPAdapter): + + __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ + 'pools', 'timeout', 'ssh_client', + ] + + def __init__(self, base_url, timeout=60, + pool_connections=constants.DEFAULT_NUM_POOLS): + self.ssh_client = paramiko.SSHClient() + self.ssh_client.load_system_host_keys() + + self.base_url = base_url + self._connect() + self.timeout = timeout + self.pools = RecentlyUsedContainer( + pool_connections, dispose_func=lambda p: p.close() + ) + super(SSHHTTPAdapter, self).__init__() + + def _connect(self): + parsed = six.moves.urllib_parse.urlparse(self.base_url) + self.ssh_client.connect( + parsed.hostname, parsed.port, parsed.username, + ) + + def get_connection(self, url, proxies=None): + with self.pools.lock: + pool = self.pools.get(url) + if pool: + return pool + + # Connection is closed try a reconnect + if not self.ssh_client.get_transport(): + self._connect() + + pool = SSHConnectionPool( + self.ssh_client, self.timeout + ) + self.pools[url] = pool + + return pool + + def close(self): + super(SSHHTTPAdapter, self).close() + self.ssh_client.close() diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py index 8fafec3..12de76c 100644 --- a/docker/transport/ssladapter.py +++ b/docker/transport/ssladapter.py @@ -7,6 +7,8 @@ import sys from distutils.version import StrictVersion from requests.adapters import HTTPAdapter +from docker.transport.basehttpadapter import BaseHTTPAdapter + try: import requests.packages.urllib3 as urllib3 except ImportError: @@ -22,7 +24,7 @@ if sys.version_info[0] < 3 or sys.version_info[1] < 5: urllib3.connection.match_hostname = match_hostname -class SSLAdapter(HTTPAdapter): +class SSLHTTPAdapter(BaseHTTPAdapter): '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint', @@ -34,7 +36,7 @@ class SSLAdapter(HTTPAdapter): self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint - super(SSLAdapter, self).__init__(**kwargs) + super(SSLHTTPAdapter, self).__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): kwargs = { @@ -57,7 +59,7 @@ class SSLAdapter(HTTPAdapter): But we still need to take care of when there is a proxy poolmanager """ - conn = super(SSLAdapter, self).get_connection(*args, **kwargs) + conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs) if conn.assert_hostname != self.assert_hostname: conn.assert_hostname = self.assert_hostname return conn diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py index c59821a..b619103 100644 --- a/docker/transport/unixconn.py +++ b/docker/transport/unixconn.py @@ -3,6 +3,7 @@ import requests.adapters import socket from six.moves import http_client as httplib +from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants try: @@ -69,7 +70,7 @@ class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): ) -class UnixAdapter(requests.adapters.HTTPAdapter): +class UnixHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', 'socket_path', @@ -85,7 +86,7 @@ class UnixAdapter(requests.adapters.HTTPAdapter): self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) - super(UnixAdapter, self).__init__() + super(UnixHTTPAdapter, self).__init__() def get_connection(self, url, proxies=None): with self.pools.lock: @@ -107,6 +108,3 @@ class UnixAdapter(requests.adapters.HTTPAdapter): # anyway, we simply return the path URL directly. # See also: https://github.com/docker/docker-py/issues/811 return request.path_url - - def close(self): - self.pools.clear() diff --git a/docker/types/__init__.py b/docker/types/__init__.py index 0b0d847..5db330e 100644 --- a/docker/types/__init__.py +++ b/docker/types/__init__.py @@ -5,7 +5,8 @@ from .healthcheck import Healthcheck from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig from .services import ( ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec, - Mount, Placement, Privileges, Resources, RestartPolicy, SecretReference, - ServiceMode, TaskTemplate, UpdateConfig + Mount, Placement, PlacementPreference, Privileges, Resources, + RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate, + UpdateConfig, NetworkAttachmentConfig ) from .swarm import SwarmSpec, SwarmExternalCA diff --git a/docker/types/containers.py b/docker/types/containers.py index 2521420..fd8cab4 100644 --- a/docker/types/containers.py +++ b/docker/types/containers.py @@ -23,6 +23,35 @@ class LogConfigTypesEnum(object): class LogConfig(DictType): + """ + Configure logging for a container, when provided as an argument to + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + You may refer to the + `official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_ + for more information. + + Args: + type (str): Indicate which log driver to use. A set of valid drivers + is provided as part of the :py:attr:`LogConfig.types` + enum. Other values may be accepted depending on the engine version + and available logging plugins. + config (dict): A driver-dependent configuration dictionary. Please + refer to the driver's documentation for a list of valid config + keys. + + Example: + + >>> from docker.types import LogConfig + >>> lc = LogConfig(type=LogConfig.types.JSON, config={ + ... 'max-size': '1g', + ... 'labels': 'production_status,geo' + ... }) + >>> hc = client.create_host_config(log_config=lc) + >>> container = client.create_container('busybox', 'true', + ... host_config=hc) + >>> client.inspect_container(container)['HostConfig']['LogConfig'] + {'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}} + """ # noqa: E501 types = LogConfigTypesEnum def __init__(self, **kwargs): @@ -50,14 +79,40 @@ class LogConfig(DictType): return self['Config'] def set_config_value(self, key, value): + """ Set a the value for ``key`` to ``value`` inside the ``config`` + dict. + """ self.config[key] = value def unset_config(self, key): + """ Remove the ``key`` property from the ``config`` dict. """ if key in self.config: del self.config[key] class Ulimit(DictType): + """ + Create a ulimit declaration to be used with + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + + Args: + + name (str): Which ulimit will this apply to. A list of valid names can + be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_. + soft (int): The soft limit for this ulimit. Optional. + hard (int): The hard limit for this ulimit. Optional. + + Example: + + >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024) + >>> hc = client.create_host_config(ulimits=[nproc_limit]) + >>> container = client.create_container( + 'busybox', 'true', host_config=hc + ) + >>> client.inspect_container(container)['HostConfig']['Ulimits'] + [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}] + + """ def __init__(self, **kwargs): name = kwargs.get('name', kwargs.get('Name')) soft = kwargs.get('soft', kwargs.get('Soft')) @@ -115,11 +170,11 @@ class HostConfig(dict): device_read_iops=None, device_write_iops=None, oom_kill_disable=False, shm_size=None, sysctls=None, tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None, - cpuset_cpus=None, userns_mode=None, pids_limit=None, - isolation=None, auto_remove=False, storage_opt=None, - init=None, init_path=None, volume_driver=None, - cpu_count=None, cpu_percent=None, nano_cpus=None, - cpuset_mems=None, runtime=None, mounts=None, + cpuset_cpus=None, userns_mode=None, uts_mode=None, + pids_limit=None, isolation=None, auto_remove=False, + storage_opt=None, init=None, init_path=None, + volume_driver=None, cpu_count=None, cpu_percent=None, + nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, cpu_rt_period=None, cpu_rt_runtime=None, device_cgroup_rules=None): @@ -264,10 +319,10 @@ class HostConfig(dict): if not isinstance(ulimits, list): raise host_config_type_error('ulimits', ulimits, 'list') self['Ulimits'] = [] - for l in ulimits: - if not isinstance(l, Ulimit): - l = Ulimit(**l) - self['Ulimits'].append(l) + for lmt in ulimits: + if not isinstance(lmt, Ulimit): + lmt = Ulimit(**lmt) + self['Ulimits'].append(lmt) if log_config is not None: if not isinstance(log_config, LogConfig): @@ -392,6 +447,11 @@ class HostConfig(dict): raise host_config_value_error("userns_mode", userns_mode) self['UsernsMode'] = userns_mode + if uts_mode: + if uts_mode != "host": + raise host_config_value_error("uts_mode", uts_mode) + self['UTSMode'] = uts_mode + if pids_limit: if not isinstance(pids_limit, int): raise host_config_type_error('pids_limit', pids_limit, 'int') @@ -573,7 +633,7 @@ class ContainerConfig(dict): 'Hostname': hostname, 'Domainname': domainname, 'ExposedPorts': ports, - 'User': six.text_type(user) if user else None, + 'User': six.text_type(user) if user is not None else None, 'Tty': tty, 'OpenStdin': stdin_open, 'StdinOnce': stdin_once, diff --git a/docker/types/daemon.py b/docker/types/daemon.py index ee8624e..af3e5bc 100644 --- a/docker/types/daemon.py +++ b/docker/types/daemon.py @@ -5,6 +5,8 @@ try: except ImportError: import urllib3 +from ..errors import DockerException + class CancellableStream(object): """ @@ -13,7 +15,7 @@ class CancellableStream(object): Example: >>> events = client.events() >>> for event in events: - ... print event + ... print(event) >>> # and cancel from another thread >>> events.close() """ @@ -55,9 +57,17 @@ class CancellableStream(object): elif hasattr(sock_raw, '_sock'): sock = sock_raw._sock + elif hasattr(sock_fp, 'channel'): + # We're working with a paramiko (SSH) channel, which doesn't + # support cancelable streams with the current implementation + raise DockerException( + 'Cancellable streams not supported for the SSH protocol' + ) else: sock = sock_fp._sock - if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket): + + if hasattr(urllib3.contrib, 'pyopenssl') and isinstance( + sock, urllib3.contrib.pyopenssl.WrappedSocket): sock = sock.socket sock.shutdown(socket.SHUT_RDWR) diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py index 61857c2..9815018 100644 --- a/docker/types/healthcheck.py +++ b/docker/types/healthcheck.py @@ -14,7 +14,7 @@ class Healthcheck(DictType): - Empty list: Inherit healthcheck from parent image - ``["NONE"]``: Disable healthcheck - ``["CMD", args...]``: exec arguments directly. - - ``["CMD-SHELL", command]``: RUn command in the system's + - ``["CMD-SHELL", command]``: Run command in the system's default shell. If a string is provided, it will be used as a ``CMD-SHELL`` @@ -23,9 +23,9 @@ class Healthcheck(DictType): should be 0 or at least 1000000 (1 ms). timeout (int): The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). - retries (integer): The number of consecutive failures needed to + retries (int): The number of consecutive failures needed to consider a container as unhealthy. - start_period (integer): Start period for the container to + start_period (int): Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). """ @@ -53,6 +53,8 @@ class Healthcheck(DictType): @test.setter def test(self, value): + if isinstance(value, six.string_types): + value = ["CMD-SHELL", value] self['Test'] = value @property diff --git a/docker/types/services.py b/docker/types/services.py index 31f4750..05dda15 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -26,8 +26,8 @@ class TaskTemplate(dict): placement (Placement): Placement instructions for the scheduler. If a list is passed instead, it is assumed to be a list of constraints as part of a :py:class:`Placement` object. - networks (:py:class:`list`): List of network names or IDs to attach - the containers to. + networks (:py:class:`list`): List of network names or IDs or + :py:class:`NetworkAttachmentConfig` to attach the service to. force_update (int): A counter that triggers an update even if no relevant parameters have been changed. """ @@ -110,13 +110,15 @@ class ContainerSpec(dict): privileges (Privileges): Security options for the service's containers. isolation (string): Isolation technology used by the service's containers. Only used for Windows containers. + init (boolean): Run an init inside the container that forwards signals + and reaps processes. """ def __init__(self, image, command=None, args=None, hostname=None, env=None, workdir=None, user=None, labels=None, mounts=None, stop_grace_period=None, secrets=None, tty=None, groups=None, open_stdin=None, read_only=None, stop_signal=None, healthcheck=None, hosts=None, dns_config=None, configs=None, - privileges=None, isolation=None): + privileges=None, isolation=None, init=None): self['Image'] = image if isinstance(command, six.string_types): @@ -183,6 +185,9 @@ class ContainerSpec(dict): if isolation is not None: self['Isolation'] = isolation + if init is not None: + self['Init'] = init + class Mount(dict): """ @@ -368,10 +373,11 @@ class UpdateConfig(dict): parallelism (int): Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). Default: 0. - delay (int): Amount of time between updates. + delay (int): Amount of time between updates, in nanoseconds. failure_action (string): Action to take if an updated task fails to run, or stops running during the update. Acceptable values are - ``continue`` and ``pause``. Default: ``continue`` + ``continue``, ``pause``, as well as ``rollback`` since API v1.28. + Default: ``continue`` monitor (int): Amount of time to monitor each updated task for failures, in nanoseconds. max_failure_ratio (float): The fraction of tasks that may fail during @@ -385,9 +391,9 @@ class UpdateConfig(dict): self['Parallelism'] = parallelism if delay is not None: self['Delay'] = delay - if failure_action not in ('pause', 'continue'): + if failure_action not in ('pause', 'continue', 'rollback'): raise errors.InvalidArgument( - 'failure_action must be either `pause` or `continue`.' + 'failure_action must be one of `pause`, `continue`, `rollback`' ) self['FailureAction'] = failure_action @@ -413,6 +419,30 @@ class UpdateConfig(dict): self['Order'] = order +class RollbackConfig(UpdateConfig): + """ + Used to specify the way containe rollbacks should be performed by a service + + Args: + parallelism (int): Maximum number of tasks to be rolled back in one + iteration (0 means unlimited parallelism). Default: 0 + delay (int): Amount of time between rollbacks, in nanoseconds. + failure_action (string): Action to take if a rolled back task fails to + run, or stops running during the rollback. Acceptable values are + ``continue``, ``pause`` or ``rollback``. + Default: ``continue`` + monitor (int): Amount of time to monitor each rolled back task for + failures, in nanoseconds. + max_failure_ratio (float): The fraction of tasks that may fail during + a rollback before the failure action is invoked, specified as a + floating point number between 0 and 1. Default: 0 + order (string): Specifies the order of operations when rolling out a + rolled back task. Either ``start_first`` or ``stop_first`` are + accepted. + """ + pass + + class RestartConditionTypesEnum(object): _values = ( 'none', @@ -623,18 +653,24 @@ class Placement(dict): Placement constraints to be used as part of a :py:class:`TaskTemplate` Args: - constraints (:py:class:`list`): A list of constraints - preferences (:py:class:`list`): Preferences provide a way to make - the scheduler aware of factors such as topology. They are - provided in order from highest to lowest precedence. - platforms (:py:class:`list`): A list of platforms expressed as - ``(arch, os)`` tuples + constraints (:py:class:`list` of str): A list of constraints + preferences (:py:class:`list` of tuple): Preferences provide a way + to make the scheduler aware of factors such as topology. They + are provided in order from highest to lowest precedence and + are expressed as ``(strategy, descriptor)`` tuples. See + :py:class:`PlacementPreference` for details. + platforms (:py:class:`list` of tuple): A list of platforms + expressed as ``(arch, os)`` tuples """ def __init__(self, constraints=None, preferences=None, platforms=None): if constraints is not None: self['Constraints'] = constraints if preferences is not None: - self['Preferences'] = preferences + self['Preferences'] = [] + for pref in preferences: + if isinstance(pref, tuple): + pref = PlacementPreference(*pref) + self['Preferences'].append(pref) if platforms: self['Platforms'] = [] for plat in platforms: @@ -643,6 +679,27 @@ class Placement(dict): }) +class PlacementPreference(dict): + """ + Placement preference to be used as an element in the list of + preferences for :py:class:`Placement` objects. + + Args: + strategy (string): The placement strategy to implement. Currently, + the only supported strategy is ``spread``. + descriptor (string): A label descriptor. For the spread strategy, + the scheduler will try to spread tasks evenly over groups of + nodes identified by this label. + """ + def __init__(self, strategy, descriptor): + if strategy != 'spread': + raise errors.InvalidArgument( + 'PlacementPreference strategy value is invalid ({}):' + ' must be "spread".'.format(strategy) + ) + self['Spread'] = {'SpreadDescriptor': descriptor} + + class DNSConfig(dict): """ Specification for DNS related configurations in resolver configuration @@ -662,7 +719,7 @@ class DNSConfig(dict): class Privileges(dict): - """ + r""" Security options for a service's containers. Part of a :py:class:`ContainerSpec` definition. @@ -713,3 +770,21 @@ class Privileges(dict): if len(selinux_context) > 0: self['SELinuxContext'] = selinux_context + + +class NetworkAttachmentConfig(dict): + """ + Network attachment options for a service. + + Args: + target (str): The target network for attachment. + Can be a network name or ID. + aliases (:py:class:`list`): A list of discoverable alternate names + for the service. + options (:py:class:`dict`): Driver attachment options for the + network target. + """ + def __init__(self, target, aliases=None, options=None): + self['Target'] = target + self['Aliases'] = aliases + self['DriverOpts'] = options diff --git a/docker/utils/ports.py b/docker/utils/ports.py index bf7d697..a50cc02 100644 --- a/docker/utils/ports.py +++ b/docker/utils/ports.py @@ -3,11 +3,11 @@ import re PORT_SPEC = re.compile( "^" # Match full string "(" # External part - "((?P<host>[a-fA-F\d.:]+):)?" # Address - "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range + r"((?P<host>[a-fA-F\d.:]+):)?" # Address + r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range ")?" - "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range - "(?P<proto>/(udp|tcp))?" # Protocol + r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range + "(?P<proto>/(udp|tcp|sctp))?" # Protocol "$" # Match full string ) diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py new file mode 100644 index 0000000..49e98ed --- /dev/null +++ b/docker/utils/proxy.py @@ -0,0 +1,73 @@ +from .utils import format_environment + + +class ProxyConfig(dict): + ''' + Hold the client's proxy configuration + ''' + @property + def http(self): + return self.get('http') + + @property + def https(self): + return self.get('https') + + @property + def ftp(self): + return self.get('ftp') + + @property + def no_proxy(self): + return self.get('no_proxy') + + @staticmethod + def from_dict(config): + ''' + Instantiate a new ProxyConfig from a dictionary that represents a + client configuration, as described in `the documentation`_. + + .. _the documentation: + https://docs.docker.com/network/proxy/#configure-the-docker-client + ''' + return ProxyConfig( + http=config.get('httpProxy'), + https=config.get('httpsProxy'), + ftp=config.get('ftpProxy'), + no_proxy=config.get('noProxy'), + ) + + def get_environment(self): + ''' + Return a dictionary representing the environment variables used to + set the proxy settings. + ''' + env = {} + if self.http: + env['http_proxy'] = env['HTTP_PROXY'] = self.http + if self.https: + env['https_proxy'] = env['HTTPS_PROXY'] = self.https + if self.ftp: + env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp + if self.no_proxy: + env['no_proxy'] = env['NO_PROXY'] = self.no_proxy + return env + + def inject_proxy_environment(self, environment): + ''' + Given a list of strings representing environment variables, prepend the + environment variables corresponding to the proxy settings. + ''' + if not self: + return environment + + proxy_env = format_environment(self.get_environment()) + if not environment: + return proxy_env + # It is important to prepend our variables, because we want the + # variables defined in "environment" to take precedence. + return proxy_env + environment + + def __str__(self): + return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format( + self.http, self.https, self.ftp, self.no_proxy) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 7b96d4f..7ba9505 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -12,6 +12,10 @@ except ImportError: NpipeSocket = type(None) +STDOUT = 1 +STDERR = 2 + + class SocketError(Exception): pass @@ -51,28 +55,43 @@ def read_exactly(socket, n): return data -def next_frame_size(socket): +def next_frame_header(socket): """ - Returns the size of the next frame of data waiting to be read from socket, - according to the protocol defined here: + Returns the stream and size of the next frame of data waiting to be read + from socket, according to the protocol defined here: - https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container + https://docs.docker.com/engine/api/v1.24/#attach-to-a-container """ try: data = read_exactly(socket, 8) except SocketError: - return -1 + return (-1, -1) + + stream, actual = struct.unpack('>BxxxL', data) + return (stream, actual) + - _, actual = struct.unpack('>BxxxL', data) - return actual +def frames_iter(socket, tty): + """ + Return a generator of frames read from socket. A frame is a tuple where + the first item is the stream number and the second item is a chunk of data. + + If the tty setting is enabled, the streams are multiplexed into the stdout + stream. + """ + if tty: + return ((STDOUT, frame) for frame in frames_iter_tty(socket)) + else: + return frames_iter_no_tty(socket) -def frames_iter(socket): +def frames_iter_no_tty(socket): """ - Returns a generator of frames read from socket + Returns a generator of data read from the socket when the tty setting is + not enabled. """ while True: - n = next_frame_size(socket) + (stream, n) = next_frame_header(socket) if n < 0: break while n > 0: @@ -84,13 +103,13 @@ def frames_iter(socket): # We have reached EOF return n -= data_length - yield result + yield (stream, result) -def socket_raw_iter(socket): +def frames_iter_tty(socket): """ - Returns a generator of data read from the socket. - This is used for non-multiplexed streams. + Return a generator of data read from the socket when the tty setting is + enabled. """ while True: result = read(socket) @@ -98,3 +117,53 @@ def socket_raw_iter(socket): # We have reached EOF return yield result + + +def consume_socket_output(frames, demux=False): + """ + Iterate through frames read from the socket and return the result. + + Args: + + demux (bool): + If False, stdout and stderr are multiplexed, and the result is the + concatenation of all the frames. If True, the streams are + demultiplexed, and the result is a 2-tuple where each item is the + concatenation of frames belonging to the same stream. + """ + if demux is False: + # If the streams are multiplexed, the generator returns strings, that + # we just need to concatenate. + return six.binary_type().join(frames) + + # If the streams are demultiplexed, the generator yields tuples + # (stdout, stderr) + out = [None, None] + for frame in frames: + # It is guaranteed that for each frame, one and only one stream + # is not None. + assert frame != (None, None) + if frame[0] is not None: + if out[0] is None: + out[0] = frame[0] + else: + out[0] += frame[0] + else: + if out[1] is None: + out[1] = frame[1] + else: + out[1] += frame[1] + return tuple(out) + + +def demux_adaptor(stream_id, data): + """ + Utility to demultiplex stdout and stderr when reading frames from the + socket. + """ + if stream_id == STDOUT: + return (data, None) + elif stream_id == STDERR: + return (None, data) + else: + raise ValueError('{0} is not a valid stream'.format(stream_id)) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index fe3b9a5..7819ace 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -1,10 +1,11 @@ import base64 +import json import os import os.path -import json import shlex -from distutils.version import StrictVersion +import string from datetime import datetime +from distutils.version import StrictVersion import six @@ -13,11 +14,12 @@ from .. import tls if six.PY2: from urllib import splitnport + from urlparse import urlparse else: - from urllib.parse import splitnport + from urllib.parse import splitnport, urlparse DEFAULT_HTTP_HOST = "127.0.0.1" -DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock" +DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' BYTE_UNITS = { @@ -212,75 +214,93 @@ def parse_repository_tag(repo_name): return repo_name, None -# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh -# fd:// protocol unsupported (for obvious reasons) -# Added support for http and https -# Protocol translation: tcp -> http, unix -> http+unix def parse_host(addr, is_win32=False, tls=False): - proto = "http+unix" - port = None path = '' + port = None + host = None + # Sensible defaults if not addr and is_win32: - addr = DEFAULT_NPIPE - + return DEFAULT_NPIPE if not addr or addr.strip() == 'unix://': return DEFAULT_UNIX_SOCKET addr = addr.strip() - if addr.startswith('http://'): - addr = addr.replace('http://', 'tcp://') - if addr.startswith('http+unix://'): - addr = addr.replace('http+unix://', 'unix://') - if addr == 'tcp://': + parsed_url = urlparse(addr) + proto = parsed_url.scheme + if not proto or any([x not in string.ascii_letters + '+' for x in proto]): + # https://bugs.python.org/issue754016 + parsed_url = urlparse('//' + addr, 'tcp') + proto = 'tcp' + + if proto == 'fd': + raise errors.DockerException('fd protocol is not implemented') + + # These protos are valid aliases for our library but not for the + # official spec + if proto == 'http' or proto == 'https': + tls = proto == 'https' + proto = 'tcp' + elif proto == 'http+unix': + proto = 'unix' + + if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( - "Invalid bind address format: {0}".format(addr) + "Invalid bind address protocol: {}".format(addr) ) - elif addr.startswith('unix://'): - addr = addr[7:] - elif addr.startswith('tcp://'): - proto = 'http{0}'.format('s' if tls else '') - addr = addr[6:] - elif addr.startswith('https://'): - proto = "https" - addr = addr[8:] - elif addr.startswith('npipe://'): - proto = 'npipe' - addr = addr[8:] - elif addr.startswith('fd://'): - raise errors.DockerException("fd protocol is not implemented") - else: - if "://" in addr: - raise errors.DockerException( - "Invalid bind address protocol: {0}".format(addr) - ) - proto = "https" if tls else "http" - if proto in ("http", "https"): - address_parts = addr.split('/', 1) - host = address_parts[0] - if len(address_parts) == 2: - path = '/' + address_parts[1] - host, port = splitnport(host) + if proto == 'tcp' and not parsed_url.netloc: + # "tcp://" is exceptionally disallowed by convention; + # omitting a hostname for other protocols is fine + raise errors.DockerException( + 'Invalid bind address format: {}'.format(addr) + ) - if port is None: - raise errors.DockerException( - "Invalid port: {0}".format(addr) - ) + if any([ + parsed_url.params, parsed_url.query, parsed_url.fragment, + parsed_url.password + ]): + raise errors.DockerException( + 'Invalid bind address format: {}'.format(addr) + ) + + if parsed_url.path and proto == 'ssh': + raise errors.DockerException( + 'Invalid bind address format: no path allowed for this protocol:' + ' {}'.format(addr) + ) + else: + path = parsed_url.path + if proto == 'unix' and parsed_url.hostname is not None: + # For legacy reasons, we consider unix://path + # to be valid and equivalent to unix:///path + path = '/'.join((parsed_url.hostname, path)) + + if proto in ('tcp', 'ssh'): + # parsed_url.hostname strips brackets from IPv6 addresses, + # which can be problematic hence our use of splitnport() instead. + host, port = splitnport(parsed_url.netloc) + if port is None or port < 0: + if proto != 'ssh': + raise errors.DockerException( + 'Invalid bind address format: port is required:' + ' {}'.format(addr) + ) + port = 22 if not host: host = DEFAULT_HTTP_HOST - else: - host = addr - if proto in ("http", "https") and port == -1: - raise errors.DockerException( - "Bind address needs a port: {0}".format(addr)) + # Rewrite schemes to fit library internals (requests adapters) + if proto == 'tcp': + proto = 'http{}'.format('s' if tls else '') + elif proto == 'unix': + proto = 'http+unix' - if proto == "http+unix" or proto == 'npipe': - return "{0}://{1}".format(proto, host).rstrip('/') - return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/') + if proto in ('http+unix', 'npipe'): + return "{}://{}".format(proto, path).rstrip('/') + return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/') def parse_devices(devices): @@ -332,9 +352,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): params = {} if host: - params['base_url'] = ( - host.replace('tcp://', 'https://') if enable_tls else host - ) + params['base_url'] = host if not enable_tls: return params @@ -366,7 +384,10 @@ def convert_filters(filters): v = 'true' if v else 'false' if not isinstance(v, list): v = [v, ] - result[k] = v + result[k] = [ + str(item) if not isinstance(item, six.string_types) else item + for item in v + ] return json.dumps(result) @@ -421,7 +442,7 @@ def normalize_links(links): if isinstance(links, dict): links = six.iteritems(links) - return ['{0}:{1}'.format(k, v) for k, v in sorted(links)] + return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)] def parse_env_file(env_file): diff --git a/docker/version.py b/docker/version.py index d451374..99a8b42 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "3.4.1" +version = "4.1.0" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) |