summaryrefslogtreecommitdiff
path: root/compose
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
committerFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
commita997ae5b1840f2878b16443bd8e3c784d23ba9ac (patch)
tree35a232d0ad24ed27ccc2ad4dfa45f7c7496f55b3 /compose
Import docker-compose_1.17.1.orig.tar.gz
[dgit import orig docker-compose_1.17.1.orig.tar.gz]
Diffstat (limited to 'compose')
-rw-r--r--compose/__init__.py4
-rw-r--r--compose/__main__.py6
-rw-r--r--compose/bundle.py258
-rw-r--r--compose/cli/__init__.py49
-rw-r--r--compose/cli/colors.py49
-rw-r--r--compose/cli/command.py120
-rw-r--r--compose/cli/docker_client.py95
-rw-r--r--compose/cli/docopt_command.py59
-rw-r--r--compose/cli/errors.py162
-rw-r--r--compose/cli/formatter.py51
-rw-r--r--compose/cli/log_printer.py250
-rw-r--r--compose/cli/main.py1297
-rw-r--r--compose/cli/signals.py30
-rw-r--r--compose/cli/utils.py150
-rw-r--r--compose/cli/verbose_proxy.py60
-rw-r--r--compose/config/__init__.py12
-rw-r--r--compose/config/config.py1306
-rw-r--r--compose/config/config_schema_v1.json188
-rw-r--r--compose/config/config_schema_v2.0.json389
-rw-r--r--compose/config/config_schema_v2.1.json441
-rw-r--r--compose/config/config_schema_v2.2.json448
-rw-r--r--compose/config/config_schema_v2.3.json451
-rw-r--r--compose/config/config_schema_v3.0.json384
-rw-r--r--compose/config/config_schema_v3.1.json429
-rw-r--r--compose/config/config_schema_v3.2.json476
-rw-r--r--compose/config/config_schema_v3.3.json535
-rw-r--r--compose/config/config_schema_v3.4.json544
-rw-r--r--compose/config/config_schema_v3.5.json542
-rw-r--r--compose/config/environment.py120
-rw-r--r--compose/config/errors.py55
-rw-r--r--compose/config/interpolation.py102
-rw-r--r--compose/config/serialize.py145
-rw-r--r--compose/config/sort_services.py73
-rw-r--r--compose/config/types.py351
-rw-r--r--compose/config/validation.py467
-rw-r--r--compose/const.py63
-rw-r--r--compose/container.py276
-rw-r--r--compose/errors.py33
-rw-r--r--compose/network.py286
-rw-r--r--compose/parallel.py298
-rw-r--r--compose/progress_stream.py111
-rw-r--r--compose/project.py674
-rw-r--r--compose/service.py1428
-rw-r--r--compose/state.py0
-rw-r--r--compose/timeparse.py96
-rw-r--r--compose/utils.py145
-rw-r--r--compose/version.py10
-rw-r--r--compose/volume.py149
48 files changed, 13667 insertions, 0 deletions
diff --git a/compose/__init__.py b/compose/__init__.py
new file mode 100644
index 00000000..20392ec9
--- /dev/null
+++ b/compose/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+__version__ = '1.17.1'
diff --git a/compose/__main__.py b/compose/__main__.py
new file mode 100644
index 00000000..27a7acbb
--- /dev/null
+++ b/compose/__main__.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose.cli.main import main
+
+main()
diff --git a/compose/bundle.py b/compose/bundle.py
new file mode 100644
index 00000000..937a3708
--- /dev/null
+++ b/compose/bundle.py
@@ -0,0 +1,258 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+
+import six
+from docker.utils import split_command
+from docker.utils.ports import split_port
+
+from .cli.errors import UserError
+from .config.serialize import denormalize_config
+from .network import get_network_defs_for_service
+from .service import format_environment
+from .service import NoSuchImageError
+from .service import parse_repository_tag
+
+
+log = logging.getLogger(__name__)
+
+
+SERVICE_KEYS = {
+ 'working_dir': 'WorkingDir',
+ 'user': 'User',
+ 'labels': 'Labels',
+}
+
+IGNORED_KEYS = {'build'}
+
+SUPPORTED_KEYS = {
+ 'image',
+ 'ports',
+ 'expose',
+ 'networks',
+ 'command',
+ 'environment',
+ 'entrypoint',
+} | set(SERVICE_KEYS)
+
+VERSION = '0.1'
+
+
+class NeedsPush(Exception):
+ def __init__(self, image_name):
+ self.image_name = image_name
+
+
+class NeedsPull(Exception):
+ def __init__(self, image_name, service_name):
+ self.image_name = image_name
+ self.service_name = service_name
+
+
+class MissingDigests(Exception):
+ def __init__(self, needs_push, needs_pull):
+ self.needs_push = needs_push
+ self.needs_pull = needs_pull
+
+
+def serialize_bundle(config, image_digests):
+ return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
+
+
+def get_image_digests(project, allow_push=False):
+ digests = {}
+ needs_push = set()
+ needs_pull = set()
+
+ for service in project.services:
+ try:
+ digests[service.name] = get_image_digest(
+ service,
+ allow_push=allow_push,
+ )
+ except NeedsPush as e:
+ needs_push.add(e.image_name)
+ except NeedsPull as e:
+ needs_pull.add(e.service_name)
+
+ if needs_push or needs_pull:
+ raise MissingDigests(needs_push, needs_pull)
+
+ return digests
+
+
+def get_image_digest(service, allow_push=False):
+ if 'image' not in service.options:
+ raise UserError(
+ "Service '{s.name}' doesn't define an image tag. An image name is "
+ "required to generate a proper image digest for the bundle. Specify "
+ "an image repo and tag with the 'image' option.".format(s=service))
+
+ _, _, separator = parse_repository_tag(service.options['image'])
+ # Compose file already uses a digest, no lookup required
+ if separator == '@':
+ return service.options['image']
+
+ try:
+ image = service.image()
+ except NoSuchImageError:
+ action = 'build' if 'build' in service.options else 'pull'
+ raise UserError(
+ "Image not found for service '{service}'. "
+ "You might need to run `docker-compose {action} {service}`."
+ .format(service=service.name, action=action))
+
+ if image['RepoDigests']:
+ # TODO: pick a digest based on the image tag if there are multiple
+ # digests
+ return image['RepoDigests'][0]
+
+ if 'build' not in service.options:
+ raise NeedsPull(service.image_name, service.name)
+
+ if not allow_push:
+ raise NeedsPush(service.image_name)
+
+ return push_image(service)
+
+
+def push_image(service):
+ try:
+ digest = service.push()
+ except Exception:
+ log.error(
+ "Failed to push image for service '{s.name}'. Please use an "
+ "image tag that can be pushed to a Docker "
+ "registry.".format(s=service))
+ raise
+
+ if not digest:
+ raise ValueError("Failed to get digest for %s" % service.name)
+
+ repo, _, _ = parse_repository_tag(service.options['image'])
+ identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
+
+ # only do this if RepoDigests isn't already populated
+ image = service.image()
+ if not image['RepoDigests']:
+ # Pull by digest so that image['RepoDigests'] is populated for next time
+ # and we don't have to pull/push again
+ service.client.pull(identifier)
+ log.info("Stored digest for {}".format(service.image_name))
+
+ return identifier
+
+
+def to_bundle(config, image_digests):
+ if config.networks:
+ log.warn("Unsupported top level key 'networks' - ignoring")
+
+ if config.volumes:
+ log.warn("Unsupported top level key 'volumes' - ignoring")
+
+ config = denormalize_config(config)
+
+ return {
+ 'Version': VERSION,
+ 'Services': {
+ name: convert_service_to_bundle(
+ name,
+ service_dict,
+ image_digests[name],
+ )
+ for name, service_dict in config['services'].items()
+ },
+ }
+
+
+def convert_service_to_bundle(name, service_dict, image_digest):
+ container_config = {'Image': image_digest}
+
+ for key, value in service_dict.items():
+ if key in IGNORED_KEYS:
+ continue
+
+ if key not in SUPPORTED_KEYS:
+ log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
+ continue
+
+ if key == 'environment':
+ container_config['Env'] = format_environment({
+ envkey: envvalue for envkey, envvalue in value.items()
+ if envvalue
+ })
+ continue
+
+ if key in SERVICE_KEYS:
+ container_config[SERVICE_KEYS[key]] = value
+ continue
+
+ set_command_and_args(
+ container_config,
+ service_dict.get('entrypoint', []),
+ service_dict.get('command', []))
+ container_config['Networks'] = make_service_networks(name, service_dict)
+
+ ports = make_port_specs(service_dict)
+ if ports:
+ container_config['Ports'] = ports
+
+ return container_config
+
+
+# See https://github.com/docker/swarmkit/blob/agent/exec/container/container.go#L95
+def set_command_and_args(config, entrypoint, command):
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if entrypoint:
+ config['Command'] = entrypoint + command
+ return
+
+ if command:
+ config['Args'] = command
+
+
+def make_service_networks(name, service_dict):
+ networks = []
+
+ for network_name, network_def in get_network_defs_for_service(service_dict).items():
+ for key in network_def.keys():
+ log.warn(
+ "Unsupported key '{}' in services.{}.networks.{} - ignoring"
+ .format(key, name, network_name))
+
+ networks.append(network_name)
+
+ return networks
+
+
+def make_port_specs(service_dict):
+ ports = []
+
+ internal_ports = [
+ internal_port
+ for port_def in service_dict.get('ports', [])
+ for internal_port in split_port(port_def)[0]
+ ]
+
+ internal_ports += service_dict.get('expose', [])
+
+ for internal_port in internal_ports:
+ spec = make_port_spec(internal_port)
+ if spec not in ports:
+ ports.append(spec)
+
+ return ports
+
+
+def make_port_spec(value):
+ components = six.text_type(value).partition('/')
+ return {
+ 'Protocol': components[2] or 'tcp',
+ 'Port': int(components[0]),
+ }
diff --git a/compose/cli/__init__.py b/compose/cli/__init__.py
new file mode 100644
index 00000000..2574a311
--- /dev/null
+++ b/compose/cli/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import subprocess
+import sys
+
+# Attempt to detect https://github.com/docker/compose/issues/4344
+try:
+ # We don't try importing pip because it messes with package imports
+ # on some Linux distros (Ubuntu, Fedora)
+ # https://github.com/docker/compose/issues/4425
+ # https://github.com/docker/compose/issues/4481
+ # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
+ env = os.environ.copy()
+ env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
+
+ s_cmd = subprocess.Popen(
+ # DO NOT replace this call with a `sys.executable` call. It breaks the binary
+ # distribution (with the binary calling itself recursively over and over).
+ ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ env=env
+ )
+ packages = s_cmd.communicate()[0].splitlines()
+ dockerpy_installed = len(
+ list(filter(lambda p: p.startswith(b'docker-py=='), packages))
+ ) > 0
+ if dockerpy_installed:
+ from .colors import yellow
+ print(
+ yellow('WARNING:'),
+ "Dependency conflict: an older version of the 'docker-py' package "
+ "may be polluting the namespace. "
+ "If you're experiencing crashes, run the following command to remedy the issue:\n"
+ "pip uninstall docker-py; pip uninstall docker; pip install docker",
+ file=sys.stderr
+ )
+
+except OSError:
+ # pip command is not available, which indicates it's probably the binary
+ # distribution of Compose which is not affected
+ pass
+except UnicodeDecodeError:
+ # ref: https://github.com/docker/compose/issues/4663
+ # This could be caused by a number of things, but it seems to be a
+ # python 2 + MacOS interaction. It's not ideal to ignore this, but at least
+ # it doesn't make the program unusable.
+ pass
diff --git a/compose/cli/colors.py b/compose/cli/colors.py
new file mode 100644
index 00000000..cb30e361
--- /dev/null
+++ b/compose/cli/colors.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from ..const import IS_WINDOWS_PLATFORM
+
+NAMES = [
+ 'grey',
+ 'red',
+ 'green',
+ 'yellow',
+ 'blue',
+ 'magenta',
+ 'cyan',
+ 'white'
+]
+
+
+def get_pairs():
+ for i, name in enumerate(NAMES):
+ yield(name, str(30 + i))
+ yield('intense_' + name, str(30 + i) + ';1')
+
+
+def ansi(code):
+ return '\033[{0}m'.format(code)
+
+
+def ansi_color(code, s):
+ return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
+
+
+def make_color_fn(code):
+ return lambda s: ansi_color(code, s)
+
+
+if IS_WINDOWS_PLATFORM:
+ import colorama
+ colorama.init(strip=False)
+for (name, code) in get_pairs():
+ globals()[name] = make_color_fn(code)
+
+
+def rainbow():
+ cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
+ 'intense_cyan', 'intense_yellow', 'intense_green',
+ 'intense_magenta', 'intense_red', 'intense_blue']
+
+ for c in cs:
+ yield globals()[c]
diff --git a/compose/cli/command.py b/compose/cli/command.py
new file mode 100644
index 00000000..e1ae690c
--- /dev/null
+++ b/compose/cli/command.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import re
+
+import six
+
+from . import errors
+from . import verbose_proxy
+from .. import config
+from ..config.environment import Environment
+from ..const import API_VERSIONS
+from ..project import Project
+from .docker_client import docker_client
+from .docker_client import get_tls_version
+from .docker_client import tls_config_from_options
+from .utils import get_version_info
+
+log = logging.getLogger(__name__)
+
+
+def project_from_options(project_dir, options):
+ environment = Environment.from_env_file(project_dir)
+ host = options.get('--host')
+ if host is not None:
+ host = host.lstrip('=')
+ return get_project(
+ project_dir,
+ get_config_path_from_options(project_dir, options, environment),
+ project_name=options.get('--project-name'),
+ verbose=options.get('--verbose'),
+ host=host,
+ tls_config=tls_config_from_options(options),
+ environment=environment,
+ override_dir=options.get('--project-directory'),
+ )
+
+
+def get_config_from_options(base_dir, options):
+ environment = Environment.from_env_file(base_dir)
+ config_path = get_config_path_from_options(
+ base_dir, options, environment
+ )
+ return config.load(
+ config.find(base_dir, config_path, environment)
+ )
+
+
+def get_config_path_from_options(base_dir, options, environment):
+ def unicode_paths(paths):
+ return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
+
+ file_option = options.get('--file')
+ if file_option:
+ return unicode_paths(file_option)
+
+ config_files = environment.get('COMPOSE_FILE')
+ if config_files:
+ pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
+ return unicode_paths(config_files.split(pathsep))
+ return None
+
+
+def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
+ tls_version=None):
+
+ client = docker_client(
+ version=version, tls_config=tls_config, host=host,
+ environment=environment, tls_version=get_tls_version(environment)
+ )
+ if verbose:
+ version_info = six.iteritems(client.version())
+ log.info(get_version_info('full'))
+ log.info("Docker base_url: %s", client.base_url)
+ log.info("Docker version: %s",
+ ", ".join("%s=%s" % item for item in version_info))
+ return verbose_proxy.VerboseProxy('docker', client)
+ return client
+
+
+def get_project(project_dir, config_path=None, project_name=None, verbose=False,
+ host=None, tls_config=None, environment=None, override_dir=None):
+ if not environment:
+ environment = Environment.from_env_file(project_dir)
+ config_details = config.find(project_dir, config_path, environment, override_dir)
+ project_name = get_project_name(
+ config_details.working_dir, project_name, environment
+ )
+ config_data = config.load(config_details)
+
+ api_version = environment.get(
+ 'COMPOSE_API_VERSION',
+ API_VERSIONS[config_data.version])
+
+ client = get_client(
+ verbose=verbose, version=api_version, tls_config=tls_config,
+ host=host, environment=environment
+ )
+
+ with errors.handle_connection_errors(client):
+ return Project.from_config(project_name, config_data, client)
+
+
+def get_project_name(working_dir, project_name=None, environment=None):
+ def normalize_name(name):
+ return re.sub(r'[^a-z0-9]', '', name.lower())
+
+ if not environment:
+ environment = Environment.from_env_file(working_dir)
+ project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
+ if project_name:
+ return normalize_name(project_name)
+
+ project = os.path.basename(os.path.abspath(working_dir))
+ if project:
+ return normalize_name(project)
+
+ return 'default'
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
new file mode 100644
index 00000000..44c7ad91
--- /dev/null
+++ b/compose/cli/docker_client.py
@@ -0,0 +1,95 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import ssl
+
+from docker import APIClient
+from docker.errors import TLSParameterError
+from docker.tls import TLSConfig
+from docker.utils import kwargs_from_env
+
+from ..const import HTTP_TIMEOUT
+from .errors import UserError
+from .utils import generate_user_agent
+from .utils import unquote_path
+
+log = logging.getLogger(__name__)
+
+
+def get_tls_version(environment):
+ compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
+ if not compose_tls_version:
+ return None
+
+ tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
+ if not hasattr(ssl, tls_attr_name):
+ log.warn(
+ 'The "{}" protocol is unavailable. You may need to update your '
+ 'version of Python or OpenSSL. Falling back to TLSv1 (default).'
+ .format(compose_tls_version)
+ )
+ return None
+
+ return getattr(ssl, tls_attr_name)
+
+
+def tls_config_from_options(options, environment=None):
+ tls = options.get('--tls', False)
+ ca_cert = unquote_path(options.get('--tlscacert'))
+ cert = unquote_path(options.get('--tlscert'))
+ key = unquote_path(options.get('--tlskey'))
+ verify = options.get('--tlsverify')
+ skip_hostname_check = options.get('--skip-hostname-check', False)
+
+ tls_version = get_tls_version(environment or {})
+
+ advanced_opts = any([ca_cert, cert, key, verify, tls_version])
+
+ if tls is True and not advanced_opts:
+ return True
+ elif advanced_opts: # --tls is a noop
+ client_cert = None
+ if cert or key:
+ client_cert = (cert, key)
+
+ return TLSConfig(
+ client_cert=client_cert, verify=verify, ca_cert=ca_cert,
+ assert_hostname=False if skip_hostname_check else None,
+ ssl_version=tls_version
+ )
+
+ return None
+
+
+def docker_client(environment, version=None, tls_config=None, host=None,
+ tls_version=None):
+ """
+ Returns a docker-py client configured using environment variables
+ according to the same logic as the official Docker client.
+ """
+ try:
+ kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
+ except TLSParameterError:
+ raise UserError(
+ "TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
+ "and DOCKER_CERT_PATH are set correctly.\n"
+ "You might need to run `eval \"$(docker-machine env default)\"`")
+
+ if host:
+ kwargs['base_url'] = host
+ if tls_config:
+ kwargs['tls'] = tls_config
+
+ if version:
+ kwargs['version'] = version
+
+ timeout = environment.get('COMPOSE_HTTP_TIMEOUT')
+ if timeout:
+ kwargs['timeout'] = int(timeout)
+ else:
+ kwargs['timeout'] = HTTP_TIMEOUT
+
+ kwargs['user_agent'] = generate_user_agent()
+
+ return APIClient(**kwargs)
diff --git a/compose/cli/docopt_command.py b/compose/cli/docopt_command.py
new file mode 100644
index 00000000..809a4b74
--- /dev/null
+++ b/compose/cli/docopt_command.py
@@ -0,0 +1,59 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from inspect import getdoc
+
+from docopt import docopt
+from docopt import DocoptExit
+
+
+def docopt_full_help(docstring, *args, **kwargs):
+ try:
+ return docopt(docstring, *args, **kwargs)
+ except DocoptExit:
+ raise SystemExit(docstring)
+
+
+class DocoptDispatcher(object):
+
+ def __init__(self, command_class, options):
+ self.command_class = command_class
+ self.options = options
+
+ def parse(self, argv):
+ command_help = getdoc(self.command_class)
+ options = docopt_full_help(command_help, argv, **self.options)
+ command = options['COMMAND']
+
+ if command is None:
+ raise SystemExit(command_help)
+
+ handler = get_handler(self.command_class, command)
+ docstring = getdoc(handler)
+
+ if docstring is None:
+ raise NoSuchCommand(command, self)
+
+ command_options = docopt_full_help(docstring, options['ARGS'], options_first=True)
+ return options, handler, command_options
+
+
+def get_handler(command_class, command):
+ command = command.replace('-', '_')
+ # we certainly want to have "exec" command, since that's what docker client has
+ # but in python exec is a keyword
+ if command == "exec":
+ command = "exec_command"
+
+ if not hasattr(command_class, command):
+ raise NoSuchCommand(command, command_class)
+
+ return getattr(command_class, command)
+
+
+class NoSuchCommand(Exception):
+ def __init__(self, command, supercommand):
+ super(NoSuchCommand, self).__init__("No such command: %s" % command)
+
+ self.command = command
+ self.supercommand = supercommand
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
new file mode 100644
index 00000000..1506aa66
--- /dev/null
+++ b/compose/cli/errors.py
@@ -0,0 +1,162 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import contextlib
+import logging
+import socket
+from distutils.spawn import find_executable
+from textwrap import dedent
+
+import six
+from docker.errors import APIError
+from requests.exceptions import ConnectionError as RequestsConnectionError
+from requests.exceptions import ReadTimeout
+from requests.exceptions import SSLError
+from requests.packages.urllib3.exceptions import ReadTimeoutError
+
+from ..const import API_VERSION_TO_ENGINE_VERSION
+from .utils import is_docker_for_mac_installed
+from .utils import is_mac
+from .utils import is_ubuntu
+from .utils import is_windows
+
+
+log = logging.getLogger(__name__)
+
+
+class UserError(Exception):
+
+ def __init__(self, msg):
+ self.msg = dedent(msg).strip()
+
+ def __unicode__(self):
+ return self.msg
+
+ __str__ = __unicode__
+
+
+class ConnectionError(Exception):
+ pass
+
+
+@contextlib.contextmanager
+def handle_connection_errors(client):
+ try:
+ yield
+ except SSLError as e:
+ log.error('SSL error: %s' % e)
+ raise ConnectionError()
+ except RequestsConnectionError as e:
+ if e.args and isinstance(e.args[0], ReadTimeoutError):
+ log_timeout_error(client.timeout)
+ raise ConnectionError()
+ exit_with_error(get_conn_error_message(client.base_url))
+ except APIError as e:
+ log_api_error(e, client.api_version)
+ raise ConnectionError()
+ except (ReadTimeout, socket.timeout) as e:
+ log_timeout_error(client.timeout)
+ raise ConnectionError()
+ except Exception as e:
+ if is_windows():
+ import pywintypes
+ if isinstance(e, pywintypes.error):
+ log_windows_pipe_error(e)
+ raise ConnectionError()
+ raise
+
+
+def log_windows_pipe_error(exc):
+ if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
+ log.error(
+ "The current Compose file version is not compatible with your engine version. "
+ "Please upgrade your Compose file to a more recent version, or set "
+ "a COMPOSE_API_VERSION in your environment."
+ )
+ else:
+ log.error(
+ "Windows named pipe error: {} (code: {})".format(exc.strerror, exc.winerror)
+ )
+
+
+def log_timeout_error(timeout):
+ log.error(
+ "An HTTP request took too long to complete. Retry with --verbose to "
+ "obtain debug information.\n"
+ "If you encounter this issue regularly because of slow network "
+ "conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
+ "value (current value: %s)." % timeout)
+
+
+def log_api_error(e, client_version):
+ explanation = e.explanation
+ if isinstance(explanation, six.binary_type):
+ explanation = explanation.decode('utf-8')
+
+ if 'client is newer than server' not in explanation:
+ log.error(explanation)
+ return
+
+ version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
+ if not version:
+ # They've set a custom API version
+ log.error(explanation)
+ return
+
+ log.error(
+ "The Docker Engine version is less than the minimum required by "
+ "Compose. Your current project requires a Docker Engine of "
+ "version {version} or greater.".format(version=version))
+
+
+def exit_with_error(msg):
+ log.error(dedent(msg).strip())
+ raise ConnectionError()
+
+
+def get_conn_error_message(url):
+ if find_executable('docker') is None:
+ return docker_not_found_msg("Couldn't connect to Docker daemon.")
+ if is_docker_for_mac_installed():
+ return conn_error_docker_for_mac
+ if find_executable('docker-machine') is not None:
+ return conn_error_docker_machine
+ return conn_error_generic.format(url=url)
+
+
+def docker_not_found_msg(problem):
+ return "{} You might need to install Docker:\n\n{}".format(
+ problem, docker_install_url())
+
+
+def docker_install_url():
+ if is_mac():
+ return docker_install_url_mac
+ elif is_ubuntu():
+ return docker_install_url_ubuntu
+ elif is_windows():
+ return docker_install_url_windows
+ else:
+ return docker_install_url_generic
+
+
+docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
+docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
+docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
+docker_install_url_generic = "https://docs.docker.com/engine/installation/"
+
+
+conn_error_docker_machine = """
+ Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
+"""
+
+conn_error_docker_for_mac = """
+ Couldn't connect to Docker daemon. You might need to start Docker for Mac.
+"""
+
+
+conn_error_generic = """
+ Couldn't connect to Docker daemon at {url} - is it running?
+
+ If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
+"""
diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py
new file mode 100644
index 00000000..6c0a3695
--- /dev/null
+++ b/compose/cli/formatter.py
@@ -0,0 +1,51 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+
+import six
+import texttable
+
+from compose.cli import colors
+
+
+def get_tty_width():
+ tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
+ if len(tty_size) != 2:
+ return 0
+ _, width = tty_size
+ return int(width)
+
+
+class Formatter(object):
+ """Format tabular data for printing."""
+ def table(self, headers, rows):
+ table = texttable.Texttable(max_width=get_tty_width())
+ table.set_cols_dtype(['t' for h in headers])
+ table.add_rows([headers] + rows)
+ table.set_deco(table.HEADER)
+ table.set_chars(['-', '|', '+', '-'])
+
+ return table.draw()
+
+
+class ConsoleWarningFormatter(logging.Formatter):
+ """A logging.Formatter which prints WARNING and ERROR messages with
+ a prefix of the log level colored appropriate for the log level.
+ """
+
+ def get_level_message(self, record):
+ separator = ': '
+ if record.levelno == logging.WARNING:
+ return colors.yellow(record.levelname) + separator
+ if record.levelno == logging.ERROR:
+ return colors.red(record.levelname) + separator
+
+ return ''
+
+ def format(self, record):
+ if isinstance(record.msg, six.binary_type):
+ record.msg = record.msg.decode('utf-8')
+ message = super(ConsoleWarningFormatter, self).format(record)
+ return '{0}{1}'.format(self.get_level_message(record), message)
diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py
new file mode 100644
index 00000000..60bba8da
--- /dev/null
+++ b/compose/cli/log_printer.py
@@ -0,0 +1,250 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+from collections import namedtuple
+from itertools import cycle
+from threading import Thread
+
+from docker.errors import APIError
+from six.moves import _thread as thread
+from six.moves.queue import Empty
+from six.moves.queue import Queue
+
+from . import colors
+from compose import utils
+from compose.cli.signals import ShutdownException
+from compose.utils import split_buffer
+
+
+class LogPresenter(object):
+
+ def __init__(self, prefix_width, color_func):
+ self.prefix_width = prefix_width
+ self.color_func = color_func
+
+ def present(self, container, line):
+ prefix = container.name_without_project.ljust(self.prefix_width)
+ return '{prefix} {line}'.format(
+ prefix=self.color_func(prefix + ' |'),
+ line=line)
+
+
+def build_log_presenters(service_names, monochrome):
+ """Return an iterable of functions.
+
+ Each function can be used to format the logs output of a container.
+ """
+ prefix_width = max_name_width(service_names)
+
+ def no_color(text):
+ return text
+
+ for color_func in cycle([no_color] if monochrome else colors.rainbow()):
+ yield LogPresenter(prefix_width, color_func)
+
+
+def max_name_width(service_names, max_index_width=3):
+ """Calculate the maximum width of container names so we can make the log
+ prefixes line up like so:
+
+ db_1 | Listening
+ web_1 | Listening
+ """
+ return max(len(name) for name in service_names) + max_index_width
+
+
+class LogPrinter(object):
+ """Print logs from many containers to a single output stream."""
+
+ def __init__(self,
+ containers,
+ presenters,
+ event_stream,
+ output=sys.stdout,
+ cascade_stop=False,
+ log_args=None):
+ self.containers = containers
+ self.presenters = presenters
+ self.event_stream = event_stream
+ self.output = utils.get_output_stream(output)
+ self.cascade_stop = cascade_stop
+ self.log_args = log_args or {}
+
+ def run(self):
+ if not self.containers:
+ return
+
+ queue = Queue()
+ thread_args = queue, self.log_args
+ thread_map = build_thread_map(self.containers, self.presenters, thread_args)
+ start_producer_thread((
+ thread_map,
+ self.event_stream,
+ self.presenters,
+ thread_args))
+
+ for line in consume_queue(queue, self.cascade_stop):
+ remove_stopped_threads(thread_map)
+
+ if self.cascade_stop:
+ matching_container = [cont.name for cont in self.containers if cont.name == line]
+ if line in matching_container:
+ # Returning the name of the container that started the
+ # the cascade_stop so we can return the correct exit code
+ return line
+
+ if not line:
+ if not thread_map:
+ # There are no running containers left to tail, so exit
+ return
+ # We got an empty line because of a timeout, but there are still
+ # active containers to tail, so continue
+ continue
+
+ self.write(line)
+
+ def write(self, line):
+ try:
+ self.output.write(line)
+ except UnicodeEncodeError:
+ # This may happen if the user's locale settings don't support UTF-8
+ # and UTF-8 characters are present in the log line. The following
+ # will output a "degraded" log with unsupported characters
+ # replaced by `?`
+ self.output.write(line.encode('ascii', 'replace').decode())
+ self.output.flush()
+
+
+def remove_stopped_threads(thread_map):
+ for container_id, tailer_thread in list(thread_map.items()):
+ if not tailer_thread.is_alive():
+ thread_map.pop(container_id, None)
+
+
+def build_thread(container, presenter, queue, log_args):
+ tailer = Thread(
+ target=tail_container_logs,
+ args=(container, presenter, queue, log_args))
+ tailer.daemon = True
+ tailer.start()
+ return tailer
+
+
+def build_thread_map(initial_containers, presenters, thread_args):
+ return {
+ container.id: build_thread(container, next(presenters), *thread_args)
+ for container in initial_containers
+ }
+
+
+class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
+
+ @classmethod
+ def new(cls, item):
+ return cls(item, None, None)
+
+ @classmethod
+ def exception(cls, exc):
+ return cls(None, None, exc)
+
+ @classmethod
+ def stop(cls, item=None):
+ return cls(item, True, None)
+
+
+def tail_container_logs(container, presenter, queue, log_args):
+ generator = get_log_generator(container)
+
+ try:
+ for item in generator(container, log_args):
+ queue.put(QueueItem.new(presenter.present(container, item)))
+ except Exception as e:
+ queue.put(QueueItem.exception(e))
+ return
+ if log_args.get('follow'):
+ queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
+ queue.put(QueueItem.stop(container.name))
+
+
+def get_log_generator(container):
+ if container.has_api_logs:
+ return build_log_generator
+ return build_no_log_generator
+
+
+def build_no_log_generator(container, log_args):
+ """Return a generator that prints a warning about logs and waits for
+ container to exit.
+ """
+ yield "WARNING: no logs are available with the '{}' log driver\n".format(
+ container.log_driver)
+
+
+def build_log_generator(container, log_args):
+ # if the container doesn't have a log_stream we need to attach to container
+ # before log printer starts running
+ if container.log_stream is None:
+ stream = container.logs(stdout=True, stderr=True, stream=True, **log_args)
+ else:
+ stream = container.log_stream
+
+ return split_buffer(stream)
+
+
+def wait_on_exit(container):
+ try:
+ exit_code = container.wait()
+ return "%s exited with code %s\n" % (container.name, exit_code)
+ except APIError as e:
+ return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
+ container.name, e.response.status_code,
+ e.response.text or '[empty]'
+ )
+
+
+def start_producer_thread(thread_args):
+ producer = Thread(target=watch_events, args=thread_args)
+ producer.daemon = True
+ producer.start()
+
+
+def watch_events(thread_map, event_stream, presenters, thread_args):
+ for event in event_stream:
+ if event['action'] == 'stop':
+ thread_map.pop(event['id'], None)
+
+ if event['action'] != 'start':
+ continue
+
+ if event['id'] in thread_map:
+ if thread_map[event['id']].is_alive():
+ continue
+ # Container was stopped and started, we need a new thread
+ thread_map.pop(event['id'], None)
+
+ thread_map[event['id']] = build_thread(
+ event['container'],
+ next(presenters),
+ *thread_args)
+
+
+def consume_queue(queue, cascade_stop):
+ """Consume the queue by reading lines off of it and yielding them."""
+ while True:
+ try:
+ item = queue.get(timeout=0.1)
+ except Empty:
+ yield None
+ continue
+ # See https://github.com/docker/compose/issues/189
+ except thread.error:
+ raise ShutdownException()
+
+ if item.exc:
+ raise item.exc
+
+ if item.is_stop and not cascade_stop:
+ continue
+
+ yield item.item
diff --git a/compose/cli/main.py b/compose/cli/main.py
new file mode 100644
index 00000000..face38e6
--- /dev/null
+++ b/compose/cli/main.py
@@ -0,0 +1,1297 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import contextlib
+import functools
+import json
+import logging
+import pipes
+import re
+import subprocess
+import sys
+from distutils.spawn import find_executable
+from inspect import getdoc
+from operator import attrgetter
+
+from . import errors
+from . import signals
+from .. import __version__
+from ..bundle import get_image_digests
+from ..bundle import MissingDigests
+from ..bundle import serialize_bundle
+from ..config import ConfigurationError
+from ..config import parse_environment
+from ..config import resolve_build_args
+from ..config.environment import Environment
+from ..config.serialize import serialize_config
+from ..config.types import VolumeSpec
+from ..const import COMPOSEFILE_V2_2 as V2_2
+from ..const import IS_WINDOWS_PLATFORM
+from ..errors import StreamParseError
+from ..progress_stream import StreamOutputError
+from ..project import NoSuchService
+from ..project import OneOffFilter
+from ..project import ProjectError
+from ..service import BuildAction
+from ..service import BuildError
+from ..service import ConvergenceStrategy
+from ..service import ImageType
+from ..service import NeedsBuildError
+from ..service import OperationFailedError
+from .command import get_config_from_options
+from .command import project_from_options
+from .docopt_command import DocoptDispatcher
+from .docopt_command import get_handler
+from .docopt_command import NoSuchCommand
+from .errors import UserError
+from .formatter import ConsoleWarningFormatter
+from .formatter import Formatter
+from .log_printer import build_log_presenters
+from .log_printer import LogPrinter
+from .utils import get_version_info
+from .utils import human_readable_file_size
+from .utils import yesno
+
+
+if not IS_WINDOWS_PLATFORM:
+ from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
+
+log = logging.getLogger(__name__)
+console_handler = logging.StreamHandler(sys.stderr)
+
+
+def main():
+ signals.ignore_sigpipe()
+ try:
+ command = dispatch()
+ command()
+ except (KeyboardInterrupt, signals.ShutdownException):
+ log.error("Aborting.")
+ sys.exit(1)
+ except (UserError, NoSuchService, ConfigurationError,
+ ProjectError, OperationFailedError) as e:
+ log.error(e.msg)
+ sys.exit(1)
+ except BuildError as e:
+ log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
+ sys.exit(1)
+ except StreamOutputError as e:
+ log.error(e)
+ sys.exit(1)
+ except NeedsBuildError as e:
+ log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
+ sys.exit(1)
+ except NoSuchCommand as e:
+ commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
+ log.error("No such command: %s\n\n%s", e.command, commands)
+ sys.exit(1)
+ except (errors.ConnectionError, StreamParseError):
+ sys.exit(1)
+
+
+def dispatch():
+ setup_logging()
+ dispatcher = DocoptDispatcher(
+ TopLevelCommand,
+ {'options_first': True, 'version': get_version_info('compose')})
+
+ options, handler, command_options = dispatcher.parse(sys.argv[1:])
+ setup_console_handler(console_handler, options.get('--verbose'), options.get('--no-ansi'))
+ setup_parallel_logger(options.get('--no-ansi'))
+ if options.get('--no-ansi'):
+ command_options['--no-color'] = True
+ return functools.partial(perform_command, options, handler, command_options)
+
+
+def perform_command(options, handler, command_options):
+ if options['COMMAND'] in ('help', 'version'):
+ # Skip looking up the compose file.
+ handler(command_options)
+ return
+
+ if options['COMMAND'] in ('config', 'bundle'):
+ command = TopLevelCommand(None)
+ handler(command, options, command_options)
+ return
+
+ project = project_from_options('.', options)
+ command = TopLevelCommand(project)
+ with errors.handle_connection_errors(project.client):
+ handler(command, command_options)
+
+
+def setup_logging():
+ root_logger = logging.getLogger()
+ root_logger.addHandler(console_handler)
+ root_logger.setLevel(logging.DEBUG)
+
+ # Disable requests logging
+ logging.getLogger("requests").propagate = False
+
+
+def setup_parallel_logger(noansi):
+ if noansi:
+ import compose.parallel
+ compose.parallel.ParallelStreamWriter.set_noansi()
+
+
+def setup_console_handler(handler, verbose, noansi=False):
+ if handler.stream.isatty() and noansi is False:
+ format_class = ConsoleWarningFormatter
+ else:
+ format_class = logging.Formatter
+
+ if verbose:
+ handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
+ handler.setLevel(logging.DEBUG)
+ else:
+ handler.setFormatter(format_class())
+ handler.setLevel(logging.INFO)
+
+
+# stolen from docopt master
+def parse_doc_section(name, source):
+ pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
+ re.IGNORECASE | re.MULTILINE)
+ return [s.strip() for s in pattern.findall(source)]
+
+
+class TopLevelCommand(object):
+ """Define and run multi-container applications with Docker.
+
+ Usage:
+ docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
+ docker-compose -h|--help
+
+ Options:
+ -f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
+ -p, --project-name NAME Specify an alternate project name (default: directory name)
+ --verbose Show more output
+ --no-ansi Do not print ANSI control characters
+ -v, --version Print version and exit
+ -H, --host HOST Daemon socket to connect to
+
+ --tls Use TLS; implied by --tlsverify
+ --tlscacert CA_PATH Trust certs signed only by this CA
+ --tlscert CLIENT_CERT_PATH Path to TLS certificate file
+ --tlskey TLS_KEY_PATH Path to TLS key file
+ --tlsverify Use TLS and verify the remote
+ --skip-hostname-check Don't check the daemon's hostname against the name specified
+ in the client certificate (for example if your docker host
+ is an IP address)
+ --project-directory PATH Specify an alternate working directory
+ (default: the path of the Compose file)
+
+ Commands:
+ build Build or rebuild services
+ bundle Generate a Docker bundle from the Compose file
+ config Validate and view the Compose file
+ create Create services
+ down Stop and remove containers, networks, images, and volumes
+ events Receive real time events from containers
+ exec Execute a command in a running container
+ help Get help on a command
+ images List images
+ kill Kill containers
+ logs View output from containers
+ pause Pause services
+ port Print the public port for a port binding
+ ps List containers
+ pull Pull service images
+ push Push service images
+ restart Restart services
+ rm Remove stopped containers
+ run Run a one-off command
+ scale Set number of containers for a service
+ start Start services
+ stop Stop services
+ top Display the running processes
+ unpause Unpause services
+ up Create and start containers
+ version Show the Docker-Compose version information
+ """
+
+ def __init__(self, project, project_dir='.'):
+ self.project = project
+ self.project_dir = '.'
+
+ def build(self, options):
+ """
+ Build or rebuild services.
+
+ Services are built once and then tagged as `project_service`,
+ e.g. `composetest_db`. If you change a service's `Dockerfile` or the
+ contents of its build directory, you can run `docker-compose build` to rebuild it.
+
+ Usage: build [options] [--build-arg key=val...] [SERVICE...]
+
+ Options:
+ --force-rm Always remove intermediate containers.
+ --no-cache Do not use cache when building the image.
+ --pull Always attempt to pull a newer version of the image.
+ --build-arg key=val Set build-time variables for one service.
+ """
+ service_names = options['SERVICE']
+ build_args = options.get('--build-arg', None)
+ if build_args:
+ environment = Environment.from_env_file(self.project_dir)
+ build_args = resolve_build_args(build_args, environment)
+
+ if not service_names and build_args:
+ raise UserError("Need service name for --build-arg option")
+
+ self.project.build(
+ service_names=service_names,
+ no_cache=bool(options.get('--no-cache', False)),
+ pull=bool(options.get('--pull', False)),
+ force_rm=bool(options.get('--force-rm', False)),
+ build_args=build_args)
+
+ def bundle(self, config_options, options):
+ """
+ Generate a Distributed Application Bundle (DAB) from the Compose file.
+
+ Images must have digests stored, which requires interaction with a
+ Docker registry. If digests aren't stored for all images, you can fetch
+ them with `docker-compose pull` or `docker-compose push`. To push images
+ automatically when bundling, pass `--push-images`. Only services with
+ a `build` option specified will have their images pushed.
+
+ Usage: bundle [options]
+
+ Options:
+ --push-images Automatically push images for any services
+ which have a `build` option specified.
+
+ -o, --output PATH Path to write the bundle file to.
+ Defaults to "<project name>.dab".
+ """
+ self.project = project_from_options('.', config_options)
+ compose_config = get_config_from_options(self.project_dir, config_options)
+
+ output = options["--output"]
+ if not output:
+ output = "{}.dab".format(self.project.name)
+
+ image_digests = image_digests_for_project(self.project, options['--push-images'])
+
+ with open(output, 'w') as f:
+ f.write(serialize_bundle(compose_config, image_digests))
+
+ log.info("Wrote bundle to {}".format(output))
+
+ def config(self, config_options, options):
+ """
+ Validate and view the Compose file.
+
+ Usage: config [options]
+
+ Options:
+ --resolve-image-digests Pin image tags to digests.
+ -q, --quiet Only validate the configuration, don't print
+ anything.
+ --services Print the service names, one per line.
+ --volumes Print the volume names, one per line.
+
+ """
+
+ compose_config = get_config_from_options(self.project_dir, config_options)
+ image_digests = None
+
+ if options['--resolve-image-digests']:
+ self.project = project_from_options('.', config_options)
+ image_digests = image_digests_for_project(self.project)
+
+ if options['--quiet']:
+ return
+
+ if options['--services']:
+ print('\n'.join(service['name'] for service in compose_config.services))
+ return
+
+ if options['--volumes']:
+ print('\n'.join(volume for volume in compose_config.volumes))
+ return
+
+ print(serialize_config(compose_config, image_digests))
+
+ def create(self, options):
+ """
+ Creates containers for a service.
+ This command is deprecated. Use the `up` command with `--no-start` instead.
+
+ Usage: create [options] [SERVICE...]
+
+ Options:
+ --force-recreate Recreate containers even if their configuration and
+ image haven't changed. Incompatible with --no-recreate.
+ --no-recreate If containers already exist, don't recreate them.
+ Incompatible with --force-recreate.
+ --no-build Don't build an image, even if it's missing.
+ --build Build images before creating containers.
+ """
+ service_names = options['SERVICE']
+
+ log.warn(
+ 'The create command is deprecated. '
+ 'Use the up command with the --no-start flag instead.'
+ )
+
+ self.project.create(
+ service_names=service_names,
+ strategy=convergence_strategy_from_opts(options),
+ do_build=build_action_from_opts(options),
+ )
+
+ def down(self, options):
+ """
+ Stops containers and removes containers, networks, volumes, and images
+ created by `up`.
+
+ By default, the only things removed are:
+
+ - Containers for services defined in the Compose file
+ - Networks defined in the `networks` section of the Compose file
+ - The default network, if one is used
+
+ Networks and volumes defined as `external` are never removed.
+
+ Usage: down [options]
+
+ Options:
+ --rmi type Remove images. Type must be one of:
+ 'all': Remove all images used by any service.
+ 'local': Remove only images that don't have a custom tag
+ set by the `image` field.
+ -v, --volumes Remove named volumes declared in the `volumes` section
+ of the Compose file and anonymous volumes
+ attached to containers.
+ --remove-orphans Remove containers for services not defined in the
+ Compose file
+ """
+ image_type = image_type_from_opt('--rmi', options['--rmi'])
+ self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
+
+ def events(self, options):
+ """
+ Receive real time events from containers.
+
+ Usage: events [options] [SERVICE...]
+
+ Options:
+ --json Output events as a stream of json objects
+ """
+ def format_event(event):
+ attributes = ["%s=%s" % item for item in event['attributes'].items()]
+ return ("{time} {type} {action} {id} ({attrs})").format(
+ attrs=", ".join(sorted(attributes)),
+ **event)
+
+ def json_format_event(event):
+ event['time'] = event['time'].isoformat()
+ event.pop('container')
+ return json.dumps(event)
+
+ for event in self.project.events():
+ formatter = json_format_event if options['--json'] else format_event
+ print(formatter(event))
+ sys.stdout.flush()
+
+ def exec_command(self, options):
+ """
+ Execute a command in a running container
+
+ Usage: exec [options] SERVICE COMMAND [ARGS...]
+
+ Options:
+ -d Detached mode: Run command in the background.
+ --privileged Give extended privileges to the process.
+ -u, --user USER Run the command as this user.
+ -T Disable pseudo-tty allocation. By default `docker-compose exec`
+ allocates a TTY.
+ --index=index index of the container if there are multiple
+ instances of a service [default: 1]
+ """
+ index = int(options.get('--index'))
+ service = self.project.get_service(options['SERVICE'])
+ detach = options['-d']
+
+ try:
+ container = service.get_container(number=index)
+ except ValueError as e:
+ raise UserError(str(e))
+ command = [options['COMMAND']] + options['ARGS']
+ tty = not options["-T"]
+
+ if IS_WINDOWS_PLATFORM and not detach:
+ args = ["exec"]
+
+ if options["-d"]:
+ args += ["--detach"]
+ else:
+ args += ["--interactive"]
+
+ if not options["-T"]:
+ args += ["--tty"]
+
+ if options["--privileged"]:
+ args += ["--privileged"]
+
+ if options["--user"]:
+ args += ["--user", options["--user"]]
+
+ args += [container.id]
+ args += command
+
+ sys.exit(call_docker(args))
+
+ create_exec_options = {
+ "privileged": options["--privileged"],
+ "user": options["--user"],
+ "tty": tty,
+ "stdin": tty,
+ }
+
+ exec_id = container.create_exec(command, **create_exec_options)
+
+ if detach:
+ container.start_exec(exec_id, tty=tty, stream=True)
+ return
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ operation = ExecOperation(
+ self.project.client,
+ exec_id,
+ interactive=tty,
+ )
+ pty = PseudoTerminal(self.project.client, operation)
+ pty.start()
+ except signals.ShutdownException:
+ log.info("received shutdown exception: closing")
+ exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
+ sys.exit(exit_code)
+
+ @classmethod
+ def help(cls, options):
+ """
+ Get help on a command.
+
+ Usage: help [COMMAND]
+ """
+ if options['COMMAND']:
+ subject = get_handler(cls, options['COMMAND'])
+ else:
+ subject = cls
+
+ print(getdoc(subject))
+
+ def images(self, options):
+ """
+ List images used by the created containers.
+ Usage: images [options] [SERVICE...]
+
+ Options:
+ -q Only display IDs
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
+
+ if options['-q']:
+ for image in set(c.image for c in containers):
+ print(image.split(':')[1])
+ else:
+ headers = [
+ 'Container',
+ 'Repository',
+ 'Tag',
+ 'Image Id',
+ 'Size'
+ ]
+ rows = []
+ for container in containers:
+ image_config = container.image_config
+ repo_tags = image_config['RepoTags'][0].rsplit(':', 1)
+ image_id = image_config['Id'].split(':')[1][:12]
+ size = human_readable_file_size(image_config['Size'])
+ rows.append([
+ container.name,
+ repo_tags[0],
+ repo_tags[1],
+ image_id,
+ size
+ ])
+ print(Formatter().table(headers, rows))
+
+ def kill(self, options):
+ """
+ Force stop service containers.
+
+ Usage: kill [options] [SERVICE...]
+
+ Options:
+ -s SIGNAL SIGNAL to send to the container.
+ Default signal is SIGKILL.
+ """
+ signal = options.get('-s', 'SIGKILL')
+
+ self.project.kill(service_names=options['SERVICE'], signal=signal)
+
+ def logs(self, options):
+ """
+ View output from containers.
+
+ Usage: logs [options] [SERVICE...]
+
+ Options:
+ --no-color Produce monochrome output.
+ -f, --follow Follow log output.
+ -t, --timestamps Show timestamps.
+ --tail="all" Number of lines to show from the end of the logs
+ for each container.
+ """
+ containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
+
+ tail = options['--tail']
+ if tail is not None:
+ if tail.isdigit():
+ tail = int(tail)
+ elif tail != 'all':
+ raise UserError("tail flag must be all or a number")
+ log_args = {
+ 'follow': options['--follow'],
+ 'tail': tail,
+ 'timestamps': options['--timestamps']
+ }
+ print("Attaching to", list_containers(containers))
+ log_printer_from_project(
+ self.project,
+ containers,
+ options['--no-color'],
+ log_args,
+ event_stream=self.project.events(service_names=options['SERVICE'])).run()
+
+ def pause(self, options):
+ """
+ Pause services.
+
+ Usage: pause [SERVICE...]
+ """
+ containers = self.project.pause(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to pause', 1)
+
+ def port(self, options):
+ """
+ Print the public port for a port binding.
+
+ Usage: port [options] SERVICE PRIVATE_PORT
+
+ Options:
+ --protocol=proto tcp or udp [default: tcp]
+ --index=index index of the container if there are multiple
+ instances of a service [default: 1]
+ """
+ index = int(options.get('--index'))
+ service = self.project.get_service(options['SERVICE'])
+ try:
+ container = service.get_container(number=index)
+ except ValueError as e:
+ raise UserError(str(e))
+ print(container.get_local_port(
+ options['PRIVATE_PORT'],
+ protocol=options.get('--protocol') or 'tcp') or '')
+
+ def ps(self, options):
+ """
+ List containers.
+
+ Usage: ps [options] [SERVICE...]
+
+ Options:
+ -q Only display IDs
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
+
+ if options['-q']:
+ for container in containers:
+ print(container.id)
+ else:
+ headers = [
+ 'Name',
+ 'Command',
+ 'State',
+ 'Ports',
+ ]
+ rows = []
+ for container in containers:
+ command = container.human_readable_command
+ if len(command) > 30:
+ command = '%s ...' % command[:26]
+ rows.append([
+ container.name,
+ command,
+ container.human_readable_state,
+ container.human_readable_ports,
+ ])
+ print(Formatter().table(headers, rows))
+
+ def pull(self, options):
+ """
+ Pulls images for services defined in a Compose file, but does not start the containers.
+
+ Usage: pull [options] [SERVICE...]
+
+ Options:
+ --ignore-pull-failures Pull what it can and ignores images with pull failures.
+ --parallel Pull multiple images in parallel.
+ --quiet Pull without printing progress information
+ """
+ self.project.pull(
+ service_names=options['SERVICE'],
+ ignore_pull_failures=options.get('--ignore-pull-failures'),
+ parallel_pull=options.get('--parallel'),
+ silent=options.get('--quiet'),
+ )
+
+ def push(self, options):
+ """
+ Pushes images for services.
+
+ Usage: push [options] [SERVICE...]
+
+ Options:
+ --ignore-push-failures Push what it can and ignores images with push failures.
+ """
+ self.project.push(
+ service_names=options['SERVICE'],
+ ignore_push_failures=options.get('--ignore-push-failures')
+ )
+
+ def rm(self, options):
+ """
+ Removes stopped service containers.
+
+ By default, anonymous volumes attached to containers will not be removed. You
+ can override this with `-v`. To list all volumes, use `docker volume ls`.
+
+ Any data which is not in a volume will be lost.
+
+ Usage: rm [options] [SERVICE...]
+
+ Options:
+ -f, --force Don't ask to confirm removal
+ -s, --stop Stop the containers, if required, before removing
+ -v Remove any anonymous volumes attached to containers
+ -a, --all Deprecated - no effect.
+ """
+ if options.get('--all'):
+ log.warn(
+ '--all flag is obsolete. This is now the default behavior '
+ 'of `docker-compose rm`'
+ )
+ one_off = OneOffFilter.include
+
+ if options.get('--stop'):
+ self.project.stop(service_names=options['SERVICE'], one_off=one_off)
+
+ all_containers = self.project.containers(
+ service_names=options['SERVICE'], stopped=True, one_off=one_off
+ )
+ stopped_containers = [c for c in all_containers if not c.is_running]
+
+ if len(stopped_containers) > 0:
+ print("Going to remove", list_containers(stopped_containers))
+ if options.get('--force') \
+ or yesno("Are you sure? [yN] ", default=False):
+ self.project.remove_stopped(
+ service_names=options['SERVICE'],
+ v=options.get('-v', False),
+ one_off=one_off
+ )
+ else:
+ print("No stopped containers")
+
+ def run(self, options):
+ """
+ Run a one-off command on a service.
+
+ For example:
+
+ $ docker-compose run web python manage.py shell
+
+ By default, linked services will be started, unless they are already
+ running. If you do not want to start linked services, use
+ `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
+
+ Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
+
+ Options:
+ -d Detached mode: Run container in the background, print
+ new container name.
+ --name NAME Assign a name to the container
+ --entrypoint CMD Override the entrypoint of the image.
+ -e KEY=VAL Set an environment variable (can be used multiple times)
+ -u, --user="" Run as specified username or uid
+ --no-deps Don't start linked services.
+ --rm Remove container after run. Ignored in detached mode.
+ -p, --publish=[] Publish a container's port(s) to the host
+ --service-ports Run command with the service's ports enabled and mapped
+ to the host.
+ -v, --volume=[] Bind mount a volume (default [])
+ -T Disable pseudo-tty allocation. By default `docker-compose run`
+ allocates a TTY.
+ -w, --workdir="" Working directory inside the container
+ """
+ service = self.project.get_service(options['SERVICE'])
+ detach = options['-d']
+
+ if options['--publish'] and options['--service-ports']:
+ raise UserError(
+ 'Service port mapping and manual port mapping '
+ 'can not be used together'
+ )
+
+ if options['COMMAND'] is not None:
+ command = [options['COMMAND']] + options['ARGS']
+ elif options['--entrypoint'] is not None:
+ command = []
+ else:
+ command = service.options.get('command')
+
+ container_options = build_container_options(options, detach, command)
+ run_one_off_container(container_options, self.project, service, options)
+
+ def scale(self, options):
+ """
+ Set number of containers to run for a service.
+
+ Numbers are specified in the form `service=num` as arguments.
+ For example:
+
+ $ docker-compose scale web=2 worker=3
+
+ This command is deprecated. Use the up command with the `--scale` flag
+ instead.
+
+ Usage: scale [options] [SERVICE=NUM...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+
+ if self.project.config_version == V2_2:
+ raise UserError(
+ 'The scale command is incompatible with the v2.2 format. '
+ 'Use the up command with the --scale flag instead.'
+ )
+ else:
+ log.warn(
+ 'The scale command is deprecated. '
+ 'Use the up command with the --scale flag instead.'
+ )
+
+ for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
+ self.project.get_service(service_name).scale(num, timeout=timeout)
+
+ def start(self, options):
+ """
+ Start existing containers.
+
+ Usage: start [SERVICE...]
+ """
+ containers = self.project.start(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to start', 1)
+
+ def stop(self, options):
+ """
+ Stop running containers without removing them.
+
+ They can be started again with `docker-compose start`.
+
+ Usage: stop [options] [SERVICE...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+ self.project.stop(service_names=options['SERVICE'], timeout=timeout)
+
+ def restart(self, options):
+ """
+ Restart running containers.
+
+ Usage: restart [options] [SERVICE...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+ containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
+ exit_if(not containers, 'No containers to restart', 1)
+
+ def top(self, options):
+ """
+ Display the running processes
+
+ Usage: top [SERVICE...]
+
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=False) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name')
+ )
+
+ for idx, container in enumerate(containers):
+ if idx > 0:
+ print()
+
+ top_data = self.project.client.top(container.name)
+ headers = top_data.get("Titles")
+ rows = []
+
+ for process in top_data.get("Processes", []):
+ rows.append(process)
+
+ print(container.name)
+ print(Formatter().table(headers, rows))
+
+ def unpause(self, options):
+ """
+ Unpause services.
+
+ Usage: unpause [SERVICE...]
+ """
+ containers = self.project.unpause(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to unpause', 1)
+
+ def up(self, options):
+ """
+ Builds, (re)creates, starts, and attaches to containers for a service.
+
+ Unless they are already running, this command also starts any linked services.
+
+ The `docker-compose up` command aggregates the output of each container. When
+ the command exits, all containers are stopped. Running `docker-compose up -d`
+ starts the containers in the background and leaves them running.
+
+ If there are existing containers for a service, and the service's configuration
+ or image was changed after the container's creation, `docker-compose up` picks
+ up the changes by stopping and recreating the containers (preserving mounted
+ volumes). To prevent Compose from picking up changes, use the `--no-recreate`
+ flag.
+
+ If you want to force Compose to stop and recreate all containers, use the
+ `--force-recreate` flag.
+
+ Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
+
+ Options:
+ -d Detached mode: Run containers in the background,
+ print new container names.
+ Incompatible with --abort-on-container-exit.
+ --no-color Produce monochrome output.
+ --no-deps Don't start linked services.
+ --force-recreate Recreate containers even if their configuration
+ and image haven't changed.
+ Incompatible with --no-recreate.
+ --no-recreate If containers already exist, don't recreate them.
+ Incompatible with --force-recreate.
+ --no-build Don't build an image, even if it's missing.
+ --no-start Don't start the services after creating them.
+ --build Build images before starting containers.
+ --abort-on-container-exit Stops all containers if any container was stopped.
+ Incompatible with -d.
+ -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
+ when attached or when containers are already
+ running. (default: 10)
+ --remove-orphans Remove containers for services not
+ defined in the Compose file
+ --exit-code-from SERVICE Return the exit code of the selected service container.
+ Implies --abort-on-container-exit.
+ --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale`
+ setting in the Compose file if present.
+ """
+ start_deps = not options['--no-deps']
+ exit_value_from = exitval_from_opts(options, self.project)
+ cascade_stop = options['--abort-on-container-exit']
+ service_names = options['SERVICE']
+ timeout = timeout_from_opts(options)
+ remove_orphans = options['--remove-orphans']
+ detached = options.get('-d')
+ no_start = options.get('--no-start')
+
+ if detached and (cascade_stop or exit_value_from):
+ raise UserError("--abort-on-container-exit and -d cannot be combined.")
+
+ if no_start:
+ for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
+ if options.get(excluded):
+ raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+
+ with up_shutdown_context(self.project, service_names, timeout, detached):
+ to_attach = self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=convergence_strategy_from_opts(options),
+ do_build=build_action_from_opts(options),
+ timeout=timeout,
+ detached=detached,
+ remove_orphans=remove_orphans,
+ scale_override=parse_scale_args(options['--scale']),
+ start=not no_start
+ )
+
+ if detached or no_start:
+ return
+
+ attached_containers = filter_containers_to_service_names(to_attach, service_names)
+
+ log_printer = log_printer_from_project(
+ self.project,
+ attached_containers,
+ options['--no-color'],
+ {'follow': True},
+ cascade_stop,
+ event_stream=self.project.events(service_names=service_names))
+ print("Attaching to", list_containers(log_printer.containers))
+ cascade_starter = log_printer.run()
+
+ if cascade_stop:
+ print("Aborting on container exit...")
+ all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
+ exit_code = compute_exit_code(
+ exit_value_from, attached_containers, cascade_starter, all_containers
+ )
+
+ self.project.stop(service_names=service_names, timeout=timeout)
+ sys.exit(exit_code)
+
+ @classmethod
+ def version(cls, options):
+ """
+ Show version informations
+
+ Usage: version [--short]
+
+ Options:
+ --short Shows only Compose's version number.
+ """
+ if options['--short']:
+ print(__version__)
+ else:
+ print(get_version_info('full'))
+
+
+def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
+ exit_code = 0
+ if exit_value_from:
+ candidates = list(filter(
+ lambda c: c.service == exit_value_from,
+ attached_containers))
+ if not candidates:
+ log.error(
+ 'No containers matching the spec "{0}" '
+ 'were run.'.format(exit_value_from)
+ )
+ exit_code = 2
+ elif len(candidates) > 1:
+ exit_values = filter(
+ lambda e: e != 0,
+ [c.inspect()['State']['ExitCode'] for c in candidates]
+ )
+
+ exit_code = exit_values[0]
+ else:
+ exit_code = candidates[0].inspect()['State']['ExitCode']
+ else:
+ for e in all_containers:
+ if (not e.is_running and cascade_starter == e.name):
+ if not e.exit_code == 0:
+ exit_code = e.exit_code
+ break
+
+ return exit_code
+
+
+def convergence_strategy_from_opts(options):
+ no_recreate = options['--no-recreate']
+ force_recreate = options['--force-recreate']
+ if force_recreate and no_recreate:
+ raise UserError("--force-recreate and --no-recreate cannot be combined.")
+
+ if force_recreate:
+ return ConvergenceStrategy.always
+
+ if no_recreate:
+ return ConvergenceStrategy.never
+
+ return ConvergenceStrategy.changed
+
+
+def timeout_from_opts(options):
+ timeout = options.get('--timeout')
+ return None if timeout is None else int(timeout)
+
+
+def image_digests_for_project(project, allow_push=False):
+ with errors.handle_connection_errors(project.client):
+ try:
+ return get_image_digests(
+ project,
+ allow_push=allow_push
+ )
+ except MissingDigests as e:
+ def list_images(images):
+ return "\n".join(" {}".format(name) for name in sorted(images))
+
+ paras = ["Some images are missing digests."]
+
+ if e.needs_push:
+ command_hint = (
+ "Use `docker-compose push {}` to push them. "
+ .format(" ".join(sorted(e.needs_push)))
+ )
+ paras += [
+ "The following images can be pushed:",
+ list_images(e.needs_push),
+ command_hint,
+ ]
+
+ if e.needs_pull:
+ command_hint = (
+ "Use `docker-compose pull {}` to pull them. "
+ .format(" ".join(sorted(e.needs_pull)))
+ )
+
+ paras += [
+ "The following images need to be pulled:",
+ list_images(e.needs_pull),
+ command_hint,
+ ]
+
+ raise UserError("\n\n".join(paras))
+
+
+def exitval_from_opts(options, project):
+ exit_value_from = options.get('--exit-code-from')
+ if exit_value_from:
+ if not options.get('--abort-on-container-exit'):
+ log.warn('using --exit-code-from implies --abort-on-container-exit')
+ options['--abort-on-container-exit'] = True
+ if exit_value_from not in [s.name for s in project.get_services()]:
+ log.error('No service named "%s" was found in your compose file.',
+ exit_value_from)
+ sys.exit(2)
+ return exit_value_from
+
+
+def image_type_from_opt(flag, value):
+ if not value:
+ return ImageType.none
+ try:
+ return ImageType[value]
+ except KeyError:
+ raise UserError("%s flag must be one of: all, local" % flag)
+
+
+def build_action_from_opts(options):
+ if options['--build'] and options['--no-build']:
+ raise UserError("--build and --no-build can not be combined.")
+
+ if options['--build']:
+ return BuildAction.force
+
+ if options['--no-build']:
+ return BuildAction.skip
+
+ return BuildAction.none
+
+
+def build_container_options(options, detach, command):
+ container_options = {
+ 'command': command,
+ 'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
+ 'stdin_open': not detach,
+ 'detach': detach,
+ }
+
+ if options['-e']:
+ container_options['environment'] = Environment.from_command_line(
+ parse_environment(options['-e'])
+ )
+
+ if options['--entrypoint']:
+ container_options['entrypoint'] = options.get('--entrypoint')
+
+ if options['--rm']:
+ container_options['restart'] = None
+
+ if options['--user']:
+ container_options['user'] = options.get('--user')
+
+ if not options['--service-ports']:
+ container_options['ports'] = []
+
+ if options['--publish']:
+ container_options['ports'] = options.get('--publish')
+
+ if options['--name']:
+ container_options['name'] = options['--name']
+
+ if options['--workdir']:
+ container_options['working_dir'] = options['--workdir']
+
+ if options['--volume']:
+ volumes = [VolumeSpec.parse(i) for i in options['--volume']]
+ container_options['volumes'] = volumes
+
+ return container_options
+
+
+def run_one_off_container(container_options, project, service, options):
+ if not options['--no-deps']:
+ deps = service.get_dependency_names()
+ if deps:
+ project.up(
+ service_names=deps,
+ start_deps=True,
+ strategy=ConvergenceStrategy.never,
+ rescale=False
+ )
+
+ project.initialize()
+
+ container = service.create_container(
+ quiet=True,
+ one_off=True,
+ **container_options)
+
+ if options['-d']:
+ service.start_container(container)
+ print(container.name)
+ return
+
+ def remove_container(force=False):
+ if options['--rm']:
+ project.client.remove_container(container.id, force=True, v=True)
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ try:
+ if IS_WINDOWS_PLATFORM:
+ service.connect_container_to_networks(container)
+ exit_code = call_docker(["start", "--attach", "--interactive", container.id])
+ else:
+ operation = RunOperation(
+ project.client,
+ container.id,
+ interactive=not options['-T'],
+ logs=False,
+ )
+ pty = PseudoTerminal(project.client, operation)
+ sockets = pty.sockets()
+ service.start_container(container)
+ pty.start(sockets)
+ exit_code = container.wait()
+ except signals.ShutdownException:
+ project.client.stop(container.id)
+ exit_code = 1
+ except signals.ShutdownException:
+ project.client.kill(container.id)
+ remove_container(force=True)
+ sys.exit(2)
+
+ remove_container()
+ sys.exit(exit_code)
+
+
+def log_printer_from_project(
+ project,
+ containers,
+ monochrome,
+ log_args,
+ cascade_stop=False,
+ event_stream=None,
+):
+ return LogPrinter(
+ containers,
+ build_log_presenters(project.service_names, monochrome),
+ event_stream or project.events(),
+ cascade_stop=cascade_stop,
+ log_args=log_args)
+
+
+def filter_containers_to_service_names(containers, service_names):
+ if not service_names:
+ return containers
+
+ return [
+ container
+ for container in containers if container.service in service_names
+ ]
+
+
+@contextlib.contextmanager
+def up_shutdown_context(project, service_names, timeout, detached):
+ if detached:
+ yield
+ return
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ try:
+ yield
+ except signals.ShutdownException:
+ print("Gracefully stopping... (press Ctrl+C again to force)")
+ project.stop(service_names=service_names, timeout=timeout)
+ except signals.ShutdownException:
+ project.kill(service_names=service_names)
+ sys.exit(2)
+
+
+def list_containers(containers):
+ return ", ".join(c.name for c in containers)
+
+
+def exit_if(condition, message, exit_code):
+ if condition:
+ log.error(message)
+ raise SystemExit(exit_code)
+
+
+def call_docker(args):
+ executable_path = find_executable('docker')
+ if not executable_path:
+ raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
+
+ args = [executable_path] + args
+ log.debug(" ".join(map(pipes.quote, args)))
+
+ return subprocess.call(args)
+
+
+def parse_scale_args(options):
+ res = {}
+ for s in options:
+ if '=' not in s:
+ raise UserError('Arguments to scale should be in the form service=num')
+ service_name, num = s.split('=', 1)
+ try:
+ num = int(num)
+ except ValueError:
+ raise UserError(
+ 'Number of containers for service "%s" is not a number' % service_name
+ )
+ res[service_name] = num
+ return res
diff --git a/compose/cli/signals.py b/compose/cli/signals.py
new file mode 100644
index 00000000..9b360c44
--- /dev/null
+++ b/compose/cli/signals.py
@@ -0,0 +1,30 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import signal
+
+from ..const import IS_WINDOWS_PLATFORM
+
+
+class ShutdownException(Exception):
+ pass
+
+
+def shutdown(signal, frame):
+ raise ShutdownException()
+
+
+def set_signal_handler(handler):
+ signal.signal(signal.SIGINT, handler)
+ signal.signal(signal.SIGTERM, handler)
+
+
+def set_signal_handler_to_shutdown():
+ set_signal_handler(shutdown)
+
+
+def ignore_sigpipe():
+ # Restore default behavior for SIGPIPE instead of raising
+ # an exception when encountered.
+ if not IS_WINDOWS_PLATFORM:
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
new file mode 100644
index 00000000..4d4fc4c1
--- /dev/null
+++ b/compose/cli/utils.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import unicode_literals
+
+import math
+import os
+import platform
+import ssl
+import subprocess
+import sys
+
+import docker
+
+import compose
+from ..const import IS_WINDOWS_PLATFORM
+
+# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
+# defining it as OSError (its parent class) if missing.
+try:
+ WindowsError
+except NameError:
+ WindowsError = OSError
+
+
+def yesno(prompt, default=None):
+ """
+ Prompt the user for a yes or no.
+
+ Can optionally specify a default value, which will only be
+ used if they enter a blank line.
+
+ Unrecognised input (anything other than "y", "n", "yes",
+ "no" or "") will return None.
+ """
+ answer = input(prompt).strip().lower()
+
+ if answer == "y" or answer == "yes":
+ return True
+ elif answer == "n" or answer == "no":
+ return False
+ elif answer == "":
+ return default
+ else:
+ return None
+
+
+def input(prompt):
+ """
+ Version of input (raw_input in Python 2) which forces a flush of sys.stdout
+ to avoid problems where the prompt fails to appear due to line buffering
+ """
+ sys.stdout.write(prompt)
+ sys.stdout.flush()
+ return sys.stdin.readline().rstrip('\n')
+
+
+def call_silently(*args, **kwargs):
+ """
+ Like subprocess.call(), but redirects stdout and stderr to /dev/null.
+ """
+ with open(os.devnull, 'w') as shutup:
+ try:
+ return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs)
+ except WindowsError:
+ # On Windows, subprocess.call() can still raise exceptions. Normalize
+ # to POSIXy behaviour by returning a nonzero exit code.
+ return 1
+
+
+def is_mac():
+ return platform.system() == 'Darwin'
+
+
+def is_ubuntu():
+ return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
+
+
+def is_windows():
+ return IS_WINDOWS_PLATFORM
+
+
+def get_version_info(scope):
+ versioninfo = 'docker-compose version {}, build {}'.format(
+ compose.__version__,
+ get_build_version())
+
+ if scope == 'compose':
+ return versioninfo
+ if scope == 'full':
+ return (
+ "{}\n"
+ "docker-py version: {}\n"
+ "{} version: {}\n"
+ "OpenSSL version: {}"
+ ).format(
+ versioninfo,
+ docker.version,
+ platform.python_implementation(),
+ platform.python_version(),
+ ssl.OPENSSL_VERSION)
+
+ raise ValueError("{} is not a valid version scope".format(scope))
+
+
+def get_build_version():
+ filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA')
+ if not os.path.exists(filename):
+ return 'unknown'
+
+ with open(filename) as fh:
+ return fh.read().strip()
+
+
+def is_docker_for_mac_installed():
+ return is_mac() and os.path.isdir('/Applications/Docker.app')
+
+
+def generate_user_agent():
+ parts = [
+ "docker-compose/{}".format(compose.__version__),
+ "docker-py/{}".format(docker.__version__),
+ ]
+ try:
+ p_system = platform.system()
+ p_release = platform.release()
+ except IOError:
+ pass
+ else:
+ parts.append("{}/{}".format(p_system, p_release))
+ return " ".join(parts)
+
+
+def unquote_path(s):
+ if not s:
+ return s
+ if s[0] == '"' and s[-1] == '"':
+ return s[1:-1]
+ return s
+
+
+def human_readable_file_size(size):
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
+ order = int(math.log(size, 2) / 10) if size else 0
+ if order >= len(suffixes):
+ order = len(suffixes) - 1
+
+ return '{0:.3g} {1}'.format(
+ size / float(1 << (order * 10)),
+ suffixes[order]
+ )
diff --git a/compose/cli/verbose_proxy.py b/compose/cli/verbose_proxy.py
new file mode 100644
index 00000000..b1592eab
--- /dev/null
+++ b/compose/cli/verbose_proxy.py
@@ -0,0 +1,60 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import logging
+import pprint
+from itertools import chain
+
+import six
+
+
+def format_call(args, kwargs):
+ args = (repr(a) for a in args)
+ kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs))
+ return "({0})".format(", ".join(chain(args, kwargs)))
+
+
+def format_return(result, max_lines):
+ if isinstance(result, (list, tuple, set)):
+ return "({0} with {1} items)".format(type(result).__name__, len(result))
+
+ if result:
+ lines = pprint.pformat(result).split('\n')
+ extra = '\n...' if len(lines) > max_lines else ''
+ return '\n'.join(lines[:max_lines]) + extra
+
+ return result
+
+
+class VerboseProxy(object):
+ """Proxy all function calls to another class and log method name, arguments
+ and return values for each call.
+ """
+
+ def __init__(self, obj_name, obj, log_name=None, max_lines=10):
+ self.obj_name = obj_name
+ self.obj = obj
+ self.max_lines = max_lines
+ self.log = logging.getLogger(log_name or __name__)
+
+ def __getattr__(self, name):
+ attr = getattr(self.obj, name)
+
+ if not six.callable(attr):
+ return attr
+
+ return functools.partial(self.proxy_callable, name)
+
+ def proxy_callable(self, call_name, *args, **kwargs):
+ self.log.info("%s %s <- %s",
+ self.obj_name,
+ call_name,
+ format_call(args, kwargs))
+
+ result = getattr(self.obj, call_name)(*args, **kwargs)
+ self.log.info("%s %s -> %s",
+ self.obj_name,
+ call_name,
+ format_return(result, self.max_lines))
+ return result
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
new file mode 100644
index 00000000..b629edf6
--- /dev/null
+++ b/compose/config/__init__.py
@@ -0,0 +1,12 @@
+# flake8: noqa
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from . import environment
+from .config import ConfigurationError
+from .config import DOCKER_CONFIG_KEYS
+from .config import find
+from .config import load
+from .config import merge_environment
+from .config import parse_environment
+from .config import resolve_build_args
diff --git a/compose/config/config.py b/compose/config/config.py
new file mode 100644
index 00000000..d5aaf953
--- /dev/null
+++ b/compose/config/config.py
@@ -0,0 +1,1306 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import logging
+import os
+import string
+import sys
+from collections import namedtuple
+
+import six
+import yaml
+from cached_property import cached_property
+
+from . import types
+from .. import const
+from ..const import COMPOSEFILE_V1 as V1
+from ..const import COMPOSEFILE_V2_1 as V2_1
+from ..const import COMPOSEFILE_V3_0 as V3_0
+from ..const import COMPOSEFILE_V3_4 as V3_4
+from ..utils import build_string_dict
+from ..utils import parse_bytes
+from ..utils import parse_nanoseconds_int
+from ..utils import splitdrive
+from ..version import ComposeVersion
+from .environment import env_vars_from_file
+from .environment import Environment
+from .environment import split_env
+from .errors import CircularReference
+from .errors import ComposeFileNotFound
+from .errors import ConfigurationError
+from .errors import DuplicateOverrideFileFound
+from .errors import VERSION_EXPLANATION
+from .interpolation import interpolate_environment_variables
+from .sort_services import get_container_name_from_network_mode
+from .sort_services import get_service_name_from_network_mode
+from .sort_services import sort_service_dicts
+from .types import parse_extra_hosts
+from .types import parse_restart_spec
+from .types import ServiceLink
+from .types import ServicePort
+from .types import VolumeFromSpec
+from .types import VolumeSpec
+from .validation import match_named_volumes
+from .validation import validate_against_config_schema
+from .validation import validate_config_section
+from .validation import validate_cpu
+from .validation import validate_depends_on
+from .validation import validate_extends_file_path
+from .validation import validate_links
+from .validation import validate_network_mode
+from .validation import validate_pid_mode
+from .validation import validate_service_constraints
+from .validation import validate_top_level_object
+from .validation import validate_ulimits
+
+
+DOCKER_CONFIG_KEYS = [
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'command',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpus',
+ 'cpuset',
+ 'detach',
+ 'devices',
+ 'dns',
+ 'dns_search',
+ 'dns_opt',
+ 'domainname',
+ 'entrypoint',
+ 'env_file',
+ 'environment',
+ 'extra_hosts',
+ 'group_add',
+ 'hostname',
+ 'healthcheck',
+ 'image',
+ 'ipc',
+ 'labels',
+ 'links',
+ 'mac_address',
+ 'mem_limit',
+ 'mem_reservation',
+ 'memswap_limit',
+ 'mem_swappiness',
+ 'net',
+ 'oom_score_adj',
+ 'pid',
+ 'ports',
+ 'privileged',
+ 'read_only',
+ 'restart',
+ 'secrets',
+ 'security_opt',
+ 'shm_size',
+ 'pids_limit',
+ 'stdin_open',
+ 'stop_signal',
+ 'sysctls',
+ 'tty',
+ 'user',
+ 'userns_mode',
+ 'volume_driver',
+ 'volumes',
+ 'volumes_from',
+ 'working_dir',
+]
+
+ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
+ 'blkio_config',
+ 'build',
+ 'container_name',
+ 'credential_spec',
+ 'dockerfile',
+ 'log_driver',
+ 'log_opt',
+ 'logging',
+ 'network_mode',
+ 'init',
+ 'scale',
+]
+
+DOCKER_VALID_URL_PREFIXES = (
+ 'http://',
+ 'https://',
+ 'git://',
+ 'github.com/',
+ 'git@',
+)
+
+SUPPORTED_FILENAMES = [
+ 'docker-compose.yml',
+ 'docker-compose.yaml',
+]
+
+DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml')
+
+
+log = logging.getLogger(__name__)
+
+
+class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
+ """
+ :param working_dir: the directory to use for relative paths in the config
+ :type working_dir: string
+ :param config_files: list of configuration files to load
+ :type config_files: list of :class:`ConfigFile`
+ :param environment: computed environment values for this project
+ :type environment: :class:`environment.Environment`
+ """
+ def __new__(cls, working_dir, config_files, environment=None):
+ if environment is None:
+ environment = Environment.from_env_file(working_dir)
+ return super(ConfigDetails, cls).__new__(
+ cls, working_dir, config_files, environment
+ )
+
+
+class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
+ """
+ :param filename: filename of the config file
+ :type filename: string
+ :param config: contents of the config file
+ :type config: :class:`dict`
+ """
+
+ @classmethod
+ def from_filename(cls, filename):
+ return cls(filename, load_yaml(filename))
+
+ @cached_property
+ def version(self):
+ if 'version' not in self.config:
+ return V1
+
+ version = self.config['version']
+
+ if isinstance(version, dict):
+ log.warn('Unexpected type for "version" key in "{}". Assuming '
+ '"version" is the name of a service, and defaulting to '
+ 'Compose file version 1.'.format(self.filename))
+ return V1
+
+ if not isinstance(version, six.string_types):
+ raise ConfigurationError(
+ 'Version in "{}" is invalid - it should be a string.'
+ .format(self.filename))
+
+ if version == '1':
+ raise ConfigurationError(
+ 'Version in "{}" is invalid. {}'
+ .format(self.filename, VERSION_EXPLANATION)
+ )
+
+ if version == '2':
+ return const.COMPOSEFILE_V2_0
+
+ if version == '3':
+ return const.COMPOSEFILE_V3_0
+
+ return ComposeVersion(version)
+
+ def get_service(self, name):
+ return self.get_service_dicts()[name]
+
+ def get_service_dicts(self):
+ return self.config if self.version == V1 else self.config.get('services', {})
+
+ def get_volumes(self):
+ return {} if self.version == V1 else self.config.get('volumes', {})
+
+ def get_networks(self):
+ return {} if self.version == V1 else self.config.get('networks', {})
+
+ def get_secrets(self):
+ return {} if self.version < const.COMPOSEFILE_V3_1 else self.config.get('secrets', {})
+
+ def get_configs(self):
+ return {} if self.version < const.COMPOSEFILE_V3_3 else self.config.get('configs', {})
+
+
+class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
+ """
+ :param version: configuration version
+ :type version: int
+ :param services: List of service description dictionaries
+ :type services: :class:`list`
+ :param volumes: Dictionary mapping volume names to description dictionaries
+ :type volumes: :class:`dict`
+ :param networks: Dictionary mapping network names to description dictionaries
+ :type networks: :class:`dict`
+ :param secrets: Dictionary mapping secret names to description dictionaries
+ :type secrets: :class:`dict`
+ :param configs: Dictionary mapping config names to description dictionaries
+ :type configs: :class:`dict`
+ """
+
+
+class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
+
+ @classmethod
+ def with_abs_paths(cls, working_dir, filename, name, config):
+ if not working_dir:
+ raise ValueError("No working_dir for ServiceConfig.")
+
+ return cls(
+ os.path.abspath(working_dir),
+ os.path.abspath(filename) if filename else filename,
+ name,
+ config)
+
+
+def find(base_dir, filenames, environment, override_dir=None):
+ if filenames == ['-']:
+ return ConfigDetails(
+ os.path.abspath(override_dir) if override_dir else os.getcwd(),
+ [ConfigFile(None, yaml.safe_load(sys.stdin))],
+ environment
+ )
+
+ if filenames:
+ filenames = [os.path.join(base_dir, f) for f in filenames]
+ else:
+ filenames = get_default_config_files(base_dir)
+
+ log.debug("Using configuration files: {}".format(",".join(filenames)))
+ return ConfigDetails(
+ override_dir if override_dir else os.path.dirname(filenames[0]),
+ [ConfigFile.from_filename(f) for f in filenames],
+ environment
+ )
+
+
+def validate_config_version(config_files):
+ main_file = config_files[0]
+ validate_top_level_object(main_file)
+ for next_file in config_files[1:]:
+ validate_top_level_object(next_file)
+
+ if main_file.version != next_file.version:
+ raise ConfigurationError(
+ "Version mismatch: file {0} specifies version {1} but "
+ "extension file {2} uses version {3}".format(
+ main_file.filename,
+ main_file.version,
+ next_file.filename,
+ next_file.version))
+
+
+def get_default_config_files(base_dir):
+ (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
+
+ if not candidates:
+ raise ComposeFileNotFound(SUPPORTED_FILENAMES)
+
+ winner = candidates[0]
+
+ if len(candidates) > 1:
+ log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
+ log.warn("Using %s\n", winner)
+
+ return [os.path.join(path, winner)] + get_default_override_file(path)
+
+
+def get_default_override_file(path):
+ override_files_in_path = [os.path.join(path, override_filename) for override_filename
+ in DEFAULT_OVERRIDE_FILENAMES
+ if os.path.exists(os.path.join(path, override_filename))]
+ if len(override_files_in_path) > 1:
+ raise DuplicateOverrideFileFound(override_files_in_path)
+ return override_files_in_path
+
+
+def find_candidates_in_parent_dirs(filenames, path):
+ """
+ Given a directory path to start, looks for filenames in the
+ directory, and then each parent directory successively,
+ until found.
+
+ Returns tuple (candidates, path).
+ """
+ candidates = [filename for filename in filenames
+ if os.path.exists(os.path.join(path, filename))]
+
+ if not candidates:
+ parent_dir = os.path.join(path, '..')
+ if os.path.abspath(parent_dir) != os.path.abspath(path):
+ return find_candidates_in_parent_dirs(filenames, parent_dir)
+
+ return (candidates, path)
+
+
+def check_swarm_only_config(service_dicts):
+ warning_template = (
+ "Some services ({services}) use the '{key}' key, which will be ignored. "
+ "Compose does not support '{key}' configuration - use "
+ "`docker stack deploy` to deploy to a swarm."
+ )
+
+ def check_swarm_only_key(service_dicts, key):
+ services = [s for s in service_dicts if s.get(key)]
+ if services:
+ log.warn(
+ warning_template.format(
+ services=", ".join(sorted(s['name'] for s in services)),
+ key=key
+ )
+ )
+
+ check_swarm_only_key(service_dicts, 'deploy')
+ check_swarm_only_key(service_dicts, 'credential_spec')
+ check_swarm_only_key(service_dicts, 'configs')
+
+
+def load(config_details):
+ """Load the configuration from a working directory and a list of
+ configuration files. Files are loaded in order, and merged on top
+ of each other to create the final configuration.
+
+ Return a fully interpolated, extended and validated configuration.
+ """
+ validate_config_version(config_details.config_files)
+
+ processed_files = [
+ process_config_file(config_file, config_details.environment)
+ for config_file in config_details.config_files
+ ]
+ config_details = config_details._replace(config_files=processed_files)
+
+ main_file = config_details.config_files[0]
+ volumes = load_mapping(
+ config_details.config_files, 'get_volumes', 'Volume'
+ )
+ networks = load_mapping(
+ config_details.config_files, 'get_networks', 'Network'
+ )
+ secrets = load_mapping(
+ config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir
+ )
+ configs = load_mapping(
+ config_details.config_files, 'get_configs', 'Config', config_details.working_dir
+ )
+ service_dicts = load_services(config_details, main_file)
+
+ if main_file.version != V1:
+ for service_dict in service_dicts:
+ match_named_volumes(service_dict, volumes)
+
+ check_swarm_only_config(service_dicts)
+
+ return Config(main_file.version, service_dicts, volumes, networks, secrets, configs)
+
+
+def load_mapping(config_files, get_func, entity_type, working_dir=None):
+ mapping = {}
+
+ for config_file in config_files:
+ for name, config in getattr(config_file, get_func)().items():
+ mapping[name] = config or {}
+ if not config:
+ continue
+
+ external = config.get('external')
+ if external:
+ name_field = 'name' if entity_type == 'Volume' else 'external_name'
+ validate_external(entity_type, name, config, config_file.version)
+ if isinstance(external, dict):
+ config[name_field] = external.get('name')
+ elif not config.get('name'):
+ config[name_field] = name
+
+ if 'driver_opts' in config:
+ config['driver_opts'] = build_string_dict(
+ config['driver_opts']
+ )
+
+ if 'labels' in config:
+ config['labels'] = parse_labels(config['labels'])
+
+ if 'file' in config:
+ config['file'] = expand_path(working_dir, config['file'])
+
+ return mapping
+
+
+def validate_external(entity_type, name, config, version):
+ if (version < V2_1 or (version >= V3_0 and version < V3_4)) and len(config.keys()) > 1:
+ raise ConfigurationError(
+ "{} {} declared as external but specifies additional attributes "
+ "({}).".format(
+ entity_type, name, ', '.join(k for k in config if k != 'external')))
+
+
+def load_services(config_details, config_file):
+ def build_service(service_name, service_dict, service_names):
+ service_config = ServiceConfig.with_abs_paths(
+ config_details.working_dir,
+ config_file.filename,
+ service_name,
+ service_dict)
+ resolver = ServiceExtendsResolver(
+ service_config, config_file, environment=config_details.environment
+ )
+ service_dict = process_service(resolver.run())
+
+ service_config = service_config._replace(config=service_dict)
+ validate_service(service_config, service_names, config_file)
+ service_dict = finalize_service(
+ service_config,
+ service_names,
+ config_file.version,
+ config_details.environment)
+ return service_dict
+
+ def build_services(service_config):
+ service_names = service_config.keys()
+ return sort_service_dicts([
+ build_service(name, service_dict, service_names)
+ for name, service_dict in service_config.items()
+ ])
+
+ def merge_services(base, override):
+ all_service_names = set(base) | set(override)
+ return {
+ name: merge_service_dicts_from_files(
+ base.get(name, {}),
+ override.get(name, {}),
+ config_file.version)
+ for name in all_service_names
+ }
+
+ service_configs = [
+ file.get_service_dicts() for file in config_details.config_files
+ ]
+
+ service_config = service_configs[0]
+ for next_config in service_configs[1:]:
+ service_config = merge_services(service_config, next_config)
+
+ return build_services(service_config)
+
+
+def interpolate_config_section(config_file, config, section, environment):
+ validate_config_section(config_file.filename, config, section)
+ return interpolate_environment_variables(
+ config_file.version,
+ config,
+ section,
+ environment
+ )
+
+
+def process_config_file(config_file, environment, service_name=None):
+ services = interpolate_config_section(
+ config_file,
+ config_file.get_service_dicts(),
+ 'service',
+ environment)
+
+ if config_file.version > V1:
+ processed_config = dict(config_file.config)
+ processed_config['services'] = services
+ processed_config['volumes'] = interpolate_config_section(
+ config_file,
+ config_file.get_volumes(),
+ 'volume',
+ environment)
+ processed_config['networks'] = interpolate_config_section(
+ config_file,
+ config_file.get_networks(),
+ 'network',
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_1:
+ processed_config['secrets'] = interpolate_config_section(
+ config_file,
+ config_file.get_secrets(),
+ 'secrets',
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_3:
+ processed_config['configs'] = interpolate_config_section(
+ config_file,
+ config_file.get_configs(),
+ 'configs',
+ environment
+ )
+ else:
+ processed_config = services
+
+ config_file = config_file._replace(config=processed_config)
+ validate_against_config_schema(config_file)
+
+ if service_name and service_name not in services:
+ raise ConfigurationError(
+ "Cannot extend service '{}' in {}: Service not found".format(
+ service_name, config_file.filename))
+
+ return config_file
+
+
+class ServiceExtendsResolver(object):
+ def __init__(self, service_config, config_file, environment, already_seen=None):
+ self.service_config = service_config
+ self.working_dir = service_config.working_dir
+ self.already_seen = already_seen or []
+ self.config_file = config_file
+ self.environment = environment
+
+ @property
+ def signature(self):
+ return self.service_config.filename, self.service_config.name
+
+ def detect_cycle(self):
+ if self.signature in self.already_seen:
+ raise CircularReference(self.already_seen + [self.signature])
+
+ def run(self):
+ self.detect_cycle()
+
+ if 'extends' in self.service_config.config:
+ service_dict = self.resolve_extends(*self.validate_and_construct_extends())
+ return self.service_config._replace(config=service_dict)
+
+ return self.service_config
+
+ def validate_and_construct_extends(self):
+ extends = self.service_config.config['extends']
+ if not isinstance(extends, dict):
+ extends = {'service': extends}
+
+ config_path = self.get_extended_config_path(extends)
+ service_name = extends['service']
+
+ if config_path == self.config_file.filename:
+ try:
+ service_config = self.config_file.get_service(service_name)
+ except KeyError:
+ raise ConfigurationError(
+ "Cannot extend service '{}' in {}: Service not found".format(
+ service_name, config_path)
+ )
+ else:
+ extends_file = ConfigFile.from_filename(config_path)
+ validate_config_version([self.config_file, extends_file])
+ extended_file = process_config_file(
+ extends_file, self.environment, service_name=service_name
+ )
+ service_config = extended_file.get_service(service_name)
+
+ return config_path, service_config, service_name
+
+ def resolve_extends(self, extended_config_path, service_dict, service_name):
+ resolver = ServiceExtendsResolver(
+ ServiceConfig.with_abs_paths(
+ os.path.dirname(extended_config_path),
+ extended_config_path,
+ service_name,
+ service_dict),
+ self.config_file,
+ already_seen=self.already_seen + [self.signature],
+ environment=self.environment
+ )
+
+ service_config = resolver.run()
+ other_service_dict = process_service(service_config)
+ validate_extended_service_dict(
+ other_service_dict,
+ extended_config_path,
+ service_name)
+
+ return merge_service_dicts(
+ other_service_dict,
+ self.service_config.config,
+ self.config_file.version)
+
+ def get_extended_config_path(self, extends_options):
+ """Service we are extending either has a value for 'file' set, which we
+ need to obtain a full path too or we are extending from a service
+ defined in our own file.
+ """
+ filename = self.service_config.filename
+ validate_extends_file_path(
+ self.service_config.name,
+ extends_options,
+ filename)
+ if 'file' in extends_options:
+ return expand_path(self.working_dir, extends_options['file'])
+ return filename
+
+
+def resolve_environment(service_dict, environment=None):
+ """Unpack any environment variables from an env_file, if set.
+ Interpolate environment values if set.
+ """
+ env = {}
+ for env_file in service_dict.get('env_file', []):
+ env.update(env_vars_from_file(env_file))
+
+ env.update(parse_environment(service_dict.get('environment')))
+ return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
+
+
+def resolve_build_args(buildargs, environment):
+ args = parse_build_arguments(buildargs)
+ return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
+
+
+def validate_extended_service_dict(service_dict, filename, service):
+ error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
+
+ if 'links' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'links' cannot be extended" % error_prefix)
+
+ if 'volumes_from' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'volumes_from' cannot be extended" % error_prefix)
+
+ if 'net' in service_dict:
+ if get_container_name_from_network_mode(service_dict['net']):
+ raise ConfigurationError(
+ "%s services with 'net: container' cannot be extended" % error_prefix)
+
+ if 'network_mode' in service_dict:
+ if get_service_name_from_network_mode(service_dict['network_mode']):
+ raise ConfigurationError(
+ "%s services with 'network_mode: service' cannot be extended" % error_prefix)
+
+ if 'depends_on' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'depends_on' cannot be extended" % error_prefix)
+
+
+def validate_service(service_config, service_names, config_file):
+ service_dict, service_name = service_config.config, service_config.name
+ validate_service_constraints(service_dict, service_name, config_file)
+ validate_paths(service_dict)
+
+ validate_cpu(service_config)
+ validate_ulimits(service_config)
+ validate_network_mode(service_config, service_names)
+ validate_pid_mode(service_config, service_names)
+ validate_depends_on(service_config, service_names)
+ validate_links(service_config, service_names)
+
+ if not service_dict.get('image') and has_uppercase(service_name):
+ raise ConfigurationError(
+ "Service '{name}' contains uppercase characters which are not valid "
+ "as part of an image name. Either use a lowercase service name or "
+ "use the `image` field to set a custom name for the service image."
+ .format(name=service_name))
+
+
+def process_service(service_config):
+ working_dir = service_config.working_dir
+ service_dict = dict(service_config.config)
+
+ if 'env_file' in service_dict:
+ service_dict['env_file'] = [
+ expand_path(working_dir, path)
+ for path in to_list(service_dict['env_file'])
+ ]
+
+ if 'build' in service_dict:
+ process_build_section(service_dict, working_dir)
+
+ if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
+ service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
+
+ if 'sysctls' in service_dict:
+ service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls']))
+
+ if 'labels' in service_dict:
+ service_dict['labels'] = parse_labels(service_dict['labels'])
+
+ service_dict = process_depends_on(service_dict)
+
+ for field in ['dns', 'dns_search', 'tmpfs']:
+ if field in service_dict:
+ service_dict[field] = to_list(service_dict[field])
+
+ service_dict = process_blkio_config(process_ports(
+ process_healthcheck(service_dict, service_config.name)
+ ))
+
+ return service_dict
+
+
+def process_build_section(service_dict, working_dir):
+ if isinstance(service_dict['build'], six.string_types):
+ service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
+ elif isinstance(service_dict['build'], dict):
+ if 'context' in service_dict['build']:
+ path = service_dict['build']['context']
+ service_dict['build']['context'] = resolve_build_path(working_dir, path)
+ if 'labels' in service_dict['build']:
+ service_dict['build']['labels'] = parse_labels(service_dict['build']['labels'])
+
+
+def process_ports(service_dict):
+ if 'ports' not in service_dict:
+ return service_dict
+
+ ports = []
+ for port_definition in service_dict['ports']:
+ if isinstance(port_definition, ServicePort):
+ ports.append(port_definition)
+ else:
+ ports.extend(ServicePort.parse(port_definition))
+ service_dict['ports'] = ports
+ return service_dict
+
+
+def process_depends_on(service_dict):
+ if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
+ service_dict['depends_on'] = dict([
+ (svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
+ ])
+ return service_dict
+
+
+def process_blkio_config(service_dict):
+ if not service_dict.get('blkio_config'):
+ return service_dict
+
+ for field in ['device_read_bps', 'device_write_bps']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ rate = v.get('rate', 0)
+ v['rate'] = parse_bytes(rate)
+ if v['rate'] is None:
+ raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate))
+
+ for field in ['device_read_iops', 'device_write_iops']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ try:
+ v['rate'] = int(v.get('rate', 0))
+ except ValueError:
+ raise ConfigurationError(
+ 'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate'))
+ )
+
+ return service_dict
+
+
+def process_healthcheck(service_dict, service_name):
+ if 'healthcheck' not in service_dict:
+ return service_dict
+
+ hc = {}
+ raw = service_dict['healthcheck']
+
+ if raw.get('disable'):
+ if len(raw) > 1:
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ '"disable: true" cannot be combined with other options'
+ .format(service_name))
+ hc['test'] = ['NONE']
+ elif 'test' in raw:
+ hc['test'] = raw['test']
+
+ for field in ['interval', 'timeout', 'start_period']:
+ if field in raw:
+ if not isinstance(raw[field], six.integer_types):
+ hc[field] = parse_nanoseconds_int(raw[field])
+ else: # Conversion has been done previously
+ hc[field] = raw[field]
+ if 'retries' in raw:
+ hc['retries'] = raw['retries']
+
+ service_dict['healthcheck'] = hc
+ return service_dict
+
+
+def finalize_service(service_config, service_names, version, environment):
+ service_dict = dict(service_config.config)
+
+ if 'environment' in service_dict or 'env_file' in service_dict:
+ service_dict['environment'] = resolve_environment(service_dict, environment)
+ service_dict.pop('env_file', None)
+
+ if 'volumes_from' in service_dict:
+ service_dict['volumes_from'] = [
+ VolumeFromSpec.parse(vf, service_names, version)
+ for vf in service_dict['volumes_from']
+ ]
+
+ if 'volumes' in service_dict:
+ service_dict['volumes'] = [
+ VolumeSpec.parse(
+ v, environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+ ) for v in service_dict['volumes']
+ ]
+
+ if 'net' in service_dict:
+ network_mode = service_dict.pop('net')
+ container_name = get_container_name_from_network_mode(network_mode)
+ if container_name and container_name in service_names:
+ service_dict['network_mode'] = 'service:{}'.format(container_name)
+ else:
+ service_dict['network_mode'] = network_mode
+
+ if 'networks' in service_dict:
+ service_dict['networks'] = parse_networks(service_dict['networks'])
+
+ if 'restart' in service_dict:
+ service_dict['restart'] = parse_restart_spec(service_dict['restart'])
+
+ if 'secrets' in service_dict:
+ service_dict['secrets'] = [
+ types.ServiceSecret.parse(s) for s in service_dict['secrets']
+ ]
+
+ if 'configs' in service_dict:
+ service_dict['configs'] = [
+ types.ServiceConfig.parse(c) for c in service_dict['configs']
+ ]
+
+ normalize_build(service_dict, service_config.working_dir, environment)
+
+ service_dict['name'] = service_config.name
+ return normalize_v1_service_format(service_dict)
+
+
+def normalize_v1_service_format(service_dict):
+ if 'log_driver' in service_dict or 'log_opt' in service_dict:
+ if 'logging' not in service_dict:
+ service_dict['logging'] = {}
+ if 'log_driver' in service_dict:
+ service_dict['logging']['driver'] = service_dict['log_driver']
+ del service_dict['log_driver']
+ if 'log_opt' in service_dict:
+ service_dict['logging']['options'] = service_dict['log_opt']
+ del service_dict['log_opt']
+
+ if 'dockerfile' in service_dict:
+ service_dict['build'] = service_dict.get('build', {})
+ service_dict['build'].update({
+ 'dockerfile': service_dict.pop('dockerfile')
+ })
+
+ return service_dict
+
+
+def merge_service_dicts_from_files(base, override, version):
+ """When merging services from multiple files we need to merge the `extends`
+ field. This is not handled by `merge_service_dicts()` which is used to
+ perform the `extends`.
+ """
+ new_service = merge_service_dicts(base, override, version)
+ if 'extends' in override:
+ new_service['extends'] = override['extends']
+ elif 'extends' in base:
+ new_service['extends'] = base['extends']
+ return new_service
+
+
+class MergeDict(dict):
+ """A dict-like object responsible for merging two dicts into one."""
+
+ def __init__(self, base, override):
+ self.base = base
+ self.override = override
+
+ def needs_merge(self, field):
+ return field in self.base or field in self.override
+
+ def merge_field(self, field, merge_func, default=None):
+ if not self.needs_merge(field):
+ return
+
+ self[field] = merge_func(
+ self.base.get(field, default),
+ self.override.get(field, default))
+
+ def merge_mapping(self, field, parse_func):
+ if not self.needs_merge(field):
+ return
+
+ self[field] = parse_func(self.base.get(field))
+ self[field].update(parse_func(self.override.get(field)))
+
+ def merge_sequence(self, field, parse_func):
+ def parse_sequence_func(seq):
+ return to_mapping((parse_func(item) for item in seq), 'merge_field')
+
+ if not self.needs_merge(field):
+ return
+
+ merged = parse_sequence_func(self.base.get(field, []))
+ merged.update(parse_sequence_func(self.override.get(field, [])))
+ self[field] = [item.repr() for item in sorted(merged.values())]
+
+ def merge_scalar(self, field):
+ if self.needs_merge(field):
+ self[field] = self.override.get(field, self.base.get(field))
+
+
+def merge_service_dicts(base, override, version):
+ md = MergeDict(base, override)
+
+ md.merge_mapping('environment', parse_environment)
+ md.merge_mapping('labels', parse_labels)
+ md.merge_mapping('ulimits', parse_flat_dict)
+ md.merge_mapping('networks', parse_networks)
+ md.merge_mapping('sysctls', parse_sysctls)
+ md.merge_mapping('depends_on', parse_depends_on)
+ md.merge_sequence('links', ServiceLink.parse)
+ md.merge_sequence('secrets', types.ServiceSecret.parse)
+ md.merge_sequence('configs', types.ServiceConfig.parse)
+ md.merge_mapping('deploy', parse_deploy)
+ md.merge_mapping('extra_hosts', parse_extra_hosts)
+
+ for field in ['volumes', 'devices']:
+ md.merge_field(field, merge_path_mappings)
+
+ for field in [
+ 'cap_add', 'cap_drop', 'expose', 'external_links',
+ 'security_opt', 'volumes_from',
+ ]:
+ md.merge_field(field, merge_unique_items_lists, default=[])
+
+ for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
+ md.merge_field(field, merge_list_or_string)
+
+ md.merge_field('logging', merge_logging, default={})
+ merge_ports(md, base, override)
+ md.merge_field('blkio_config', merge_blkio_config, default={})
+ md.merge_field('healthcheck', merge_healthchecks, default={})
+
+ for field in set(ALLOWED_KEYS) - set(md):
+ md.merge_scalar(field)
+
+ if version == V1:
+ legacy_v1_merge_image_or_build(md, base, override)
+ elif md.needs_merge('build'):
+ md['build'] = merge_build(md, base, override)
+
+ return dict(md)
+
+
+def merge_unique_items_lists(base, override):
+ override = [str(o) for o in override]
+ base = [str(b) for b in base]
+ return sorted(set().union(base, override))
+
+
+def merge_healthchecks(base, override):
+ if override.get('disabled') is True:
+ return override
+ result = base.copy()
+ result.update(override)
+ return result
+
+
+def merge_ports(md, base, override):
+ def parse_sequence_func(seq):
+ acc = []
+ for item in seq:
+ acc.extend(ServicePort.parse(item))
+ return to_mapping(acc, 'merge_field')
+
+ field = 'ports'
+
+ if not md.needs_merge(field):
+ return
+
+ merged = parse_sequence_func(md.base.get(field, []))
+ merged.update(parse_sequence_func(md.override.get(field, [])))
+ md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
+
+
+def merge_build(output, base, override):
+ def to_dict(service):
+ build_config = service.get('build', {})
+ if isinstance(build_config, six.string_types):
+ return {'context': build_config}
+ return build_config
+
+ md = MergeDict(to_dict(base), to_dict(override))
+ md.merge_scalar('context')
+ md.merge_scalar('dockerfile')
+ md.merge_scalar('network')
+ md.merge_scalar('target')
+ md.merge_scalar('shm_size')
+ md.merge_mapping('args', parse_build_arguments)
+ md.merge_field('cache_from', merge_unique_items_lists, default=[])
+ md.merge_mapping('labels', parse_labels)
+ return dict(md)
+
+
+def merge_blkio_config(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('weight')
+
+ def merge_blkio_limits(base, override):
+ index = dict((b['path'], b) for b in base)
+ for o in override:
+ index[o['path']] = o
+
+ return sorted(list(index.values()), key=lambda x: x['path'])
+
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ md.merge_field(field, merge_blkio_limits, default=[])
+
+ return dict(md)
+
+
+def merge_logging(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('driver')
+ if md.get('driver') == base.get('driver') or base.get('driver') is None:
+ md.merge_mapping('options', lambda m: m or {})
+ elif override.get('options'):
+ md['options'] = override.get('options', {})
+ return dict(md)
+
+
+def legacy_v1_merge_image_or_build(output, base, override):
+ output.pop('image', None)
+ output.pop('build', None)
+ if 'image' in override:
+ output['image'] = override['image']
+ elif 'build' in override:
+ output['build'] = override['build']
+ elif 'image' in base:
+ output['image'] = base['image']
+ elif 'build' in base:
+ output['build'] = base['build']
+
+
+def merge_environment(base, override):
+ env = parse_environment(base)
+ env.update(parse_environment(override))
+ return env
+
+
+def split_kv(kvpair):
+ if '=' in kvpair:
+ return kvpair.split('=', 1)
+ else:
+ return kvpair, ''
+
+
+def parse_dict_or_list(split_func, type_name, arguments):
+ if not arguments:
+ return {}
+
+ if isinstance(arguments, list):
+ return dict(split_func(e) for e in arguments)
+
+ if isinstance(arguments, dict):
+ return dict(arguments)
+
+ raise ConfigurationError(
+ "%s \"%s\" must be a list or mapping," %
+ (type_name, arguments)
+ )
+
+
+parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
+parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
+parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels')
+parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
+parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
+parse_depends_on = functools.partial(
+ parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
+)
+parse_deploy = functools.partial(parse_dict_or_list, split_kv, 'deploy')
+
+
+def parse_flat_dict(d):
+ if not d:
+ return {}
+
+ if isinstance(d, dict):
+ return dict(d)
+
+ raise ConfigurationError("Invalid type: expected mapping")
+
+
+def resolve_env_var(key, val, environment):
+ if val is not None:
+ return key, val
+ elif environment and key in environment:
+ return key, environment[key]
+ else:
+ return key, None
+
+
+def resolve_volume_paths(working_dir, service_dict):
+ return [
+ resolve_volume_path(working_dir, volume)
+ for volume in service_dict['volumes']
+ ]
+
+
+def resolve_volume_path(working_dir, volume):
+ mount_params = None
+ if isinstance(volume, dict):
+ container_path = volume.get('target')
+ host_path = volume.get('source')
+ mode = None
+ if host_path:
+ if volume.get('read_only'):
+ mode = 'ro'
+ if volume.get('volume', {}).get('nocopy'):
+ mode = 'nocopy'
+ mount_params = (host_path, mode)
+ else:
+ container_path, mount_params = split_path_mapping(volume)
+
+ if mount_params is not None:
+ host_path, mode = mount_params
+ if host_path is None:
+ return container_path
+ if host_path.startswith('.'):
+ host_path = expand_path(working_dir, host_path)
+ host_path = os.path.expanduser(host_path)
+ return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
+
+ return container_path
+
+
+def normalize_build(service_dict, working_dir, environment):
+
+ if 'build' in service_dict:
+ build = {}
+ # Shortcut where specifying a string is treated as the build context
+ if isinstance(service_dict['build'], six.string_types):
+ build['context'] = service_dict.pop('build')
+ else:
+ build.update(service_dict['build'])
+ if 'args' in build:
+ build['args'] = build_string_dict(
+ resolve_build_args(build.get('args'), environment)
+ )
+
+ service_dict['build'] = build
+
+
+def resolve_build_path(working_dir, build_path):
+ if is_url(build_path):
+ return build_path
+ return expand_path(working_dir, build_path)
+
+
+def is_url(build_path):
+ return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
+
+
+def validate_paths(service_dict):
+ if 'build' in service_dict:
+ build = service_dict.get('build', {})
+
+ if isinstance(build, six.string_types):
+ build_path = build
+ elif isinstance(build, dict) and 'context' in build:
+ build_path = build['context']
+ else:
+ # We have a build section but no context, so nothing to validate
+ return
+
+ if (
+ not is_url(build_path) and
+ (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
+ ):
+ raise ConfigurationError(
+ "build path %s either does not exist, is not accessible, "
+ "or is not a valid URL." % build_path)
+
+
+def merge_path_mappings(base, override):
+ d = dict_from_path_mappings(base)
+ d.update(dict_from_path_mappings(override))
+ return path_mappings_from_dict(d)
+
+
+def dict_from_path_mappings(path_mappings):
+ if path_mappings:
+ return dict(split_path_mapping(v) for v in path_mappings)
+ else:
+ return {}
+
+
+def path_mappings_from_dict(d):
+ return [join_path_mapping(v) for v in sorted(d.items())]
+
+
+def split_path_mapping(volume_path):
+ """
+ Ascertain if the volume_path contains a host path as well as a container
+ path. Using splitdrive so windows absolute paths won't cause issues with
+ splitting on ':'.
+ """
+ if isinstance(volume_path, dict):
+ return (volume_path.get('target'), volume_path)
+ drive, volume_config = splitdrive(volume_path)
+
+ if ':' in volume_config:
+ (host, container) = volume_config.split(':', 1)
+ container_drive, container_path = splitdrive(container)
+ mode = None
+ if ':' in container_path:
+ container_path, mode = container_path.rsplit(':', 1)
+
+ return (container_drive + container_path, (drive + host, mode))
+ else:
+ return (volume_path, None)
+
+
+def join_path_mapping(pair):
+ (container, host) = pair
+ if isinstance(host, dict):
+ return host
+ elif host is None:
+ return container
+ else:
+ host, mode = host
+ result = ":".join((host, container))
+ if mode:
+ result += ":" + mode
+ return result
+
+
+def expand_path(working_dir, path):
+ return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
+
+
+def merge_list_or_string(base, override):
+ return to_list(base) + to_list(override)
+
+
+def to_list(value):
+ if value is None:
+ return []
+ elif isinstance(value, six.string_types):
+ return [value]
+ else:
+ return value
+
+
+def to_mapping(sequence, key_field):
+ return {getattr(item, key_field): item for item in sequence}
+
+
+def has_uppercase(name):
+ return any(char in string.ascii_uppercase for char in name)
+
+
+def load_yaml(filename):
+ try:
+ with open(filename, 'r') as fh:
+ return yaml.safe_load(fh)
+ except (IOError, yaml.YAMLError) as e:
+ error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
+ raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/compose/config/config_schema_v1.json b/compose/config/config_schema_v1.json
new file mode 100644
index 00000000..94354cda
--- /dev/null
+++ b/compose/config/config_schema_v1.json
@@ -0,0 +1,188 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v1.json",
+
+ "type": "object",
+
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "build": {"type": "string"},
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "dockerfile": {"type": "string"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "log_driver": {"type": "string"},
+ "log_opt": {"type": "object"},
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "memswap_limit": {"type": ["number", "string"]},
+ "mem_swappiness": {"type": "integer"},
+ "net": {"type": "string"},
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_signal": {"type": "string"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {
+ "required": ["build"],
+ "not": {"required": ["image"]}
+ },
+ {
+ "required": ["image"],
+ "not": {"anyOf": [
+ {"required": ["build"]},
+ {"required": ["dockerfile"]}
+ ]}
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
new file mode 100644
index 00000000..2ad62ac5
--- /dev/null
+++ b/compose/config/config_schema_v2.0.json
@@ -0,0 +1,389 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.0.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
new file mode 100644
index 00000000..24e6ba02
--- /dev/null
+++ b/compose/config/config_schema_v2.1.json
@@ -0,0 +1,441 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.1.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
new file mode 100644
index 00000000..86fc5df9
--- /dev/null
+++ b/compose/config/config_schema_v2.2.json
@@ -0,0 +1,448 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.2.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
new file mode 100644
index 00000000..ceaf4495
--- /dev/null
+++ b/compose/config/config_schema_v2.3.json
@@ -0,0 +1,451 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.3.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "start_period": {"type": "string"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.0.json b/compose/config/config_schema_v3.0.json
new file mode 100644
index 00000000..f39344cf
--- /dev/null
+++ b/compose/config/config_schema_v3.0.json
@@ -0,0 +1,384 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.0.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.1.json b/compose/config/config_schema_v3.1.json
new file mode 100644
index 00000000..719c0fa7
--- /dev/null
+++ b/compose/config/config_schema_v3.1.json
@@ -0,0 +1,429 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.1.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.2.json b/compose/config/config_schema_v3.2.json
new file mode 100644
index 00000000..2ca8e92d
--- /dev/null
+++ b/compose/config/config_schema_v3.2.json
@@ -0,0 +1,476 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.2.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.3.json b/compose/config/config_schema_v3.3.json
new file mode 100644
index 00000000..f1eb9a66
--- /dev/null
+++ b/compose/config/config_schema_v3.3.json
@@ -0,0 +1,535 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.3.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.4.json b/compose/config/config_schema_v3.4.json
new file mode 100644
index 00000000..dae7d7d2
--- /dev/null
+++ b/compose/config/config_schema_v3.4.json
@@ -0,0 +1,544 @@
+
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.4.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.5.json b/compose/config/config_schema_v3.5.json
new file mode 100644
index 00000000..fa95d6a2
--- /dev/null
+++ b/compose/config/config_schema_v3.5.json
@@ -0,0 +1,542 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.5.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
new file mode 100644
index 00000000..4ba228c8
--- /dev/null
+++ b/compose/config/environment.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import codecs
+import contextlib
+import logging
+import os
+
+import six
+
+from ..const import IS_WINDOWS_PLATFORM
+from .errors import ConfigurationError
+
+log = logging.getLogger(__name__)
+
+
+def split_env(env):
+ if isinstance(env, six.binary_type):
+ env = env.decode('utf-8', 'replace')
+ if '=' in env:
+ return env.split('=', 1)
+ else:
+ return env, None
+
+
+def env_vars_from_file(filename):
+ """
+ Read in a line delimited file of environment variables.
+ """
+ if not os.path.exists(filename):
+ raise ConfigurationError("Couldn't find env file: %s" % filename)
+ elif not os.path.isfile(filename):
+ raise ConfigurationError("%s is not a file." % (filename))
+ env = {}
+ with contextlib.closing(codecs.open(filename, 'r', 'utf-8')) as fileobj:
+ for line in fileobj:
+ line = line.strip()
+ if line and not line.startswith('#'):
+ k, v = split_env(line)
+ env[k] = v
+ return env
+
+
+class Environment(dict):
+ def __init__(self, *args, **kwargs):
+ super(Environment, self).__init__(*args, **kwargs)
+ self.missing_keys = []
+
+ @classmethod
+ def from_env_file(cls, base_dir):
+ def _initialize():
+ result = cls()
+ if base_dir is None:
+ return result
+ env_file_path = os.path.join(base_dir, '.env')
+ try:
+ return cls(env_vars_from_file(env_file_path))
+ except ConfigurationError:
+ pass
+ return result
+ instance = _initialize()
+ instance.update(os.environ)
+ return instance
+
+ @classmethod
+ def from_command_line(cls, parsed_env_opts):
+ result = cls()
+ for k, v in parsed_env_opts.items():
+ # Values from the command line take priority, unless they're unset
+ # in which case they take the value from the system's environment
+ if v is None and k in os.environ:
+ result[k] = os.environ[k]
+ else:
+ result[k] = v
+ return result
+
+ def __getitem__(self, key):
+ try:
+ return super(Environment, self).__getitem__(key)
+ except KeyError:
+ if IS_WINDOWS_PLATFORM:
+ try:
+ return super(Environment, self).__getitem__(key.upper())
+ except KeyError:
+ pass
+ if key not in self.missing_keys:
+ log.warn(
+ "The {} variable is not set. Defaulting to a blank string."
+ .format(key)
+ )
+ self.missing_keys.append(key)
+
+ return ""
+
+ def __contains__(self, key):
+ result = super(Environment, self).__contains__(key)
+ if IS_WINDOWS_PLATFORM:
+ return (
+ result or super(Environment, self).__contains__(key.upper())
+ )
+ return result
+
+ def get(self, key, *args, **kwargs):
+ if IS_WINDOWS_PLATFORM:
+ return super(Environment, self).get(
+ key,
+ super(Environment, self).get(key.upper(), *args, **kwargs)
+ )
+ return super(Environment, self).get(key, *args, **kwargs)
+
+ def get_boolean(self, key):
+ # Convert a value to a boolean using "common sense" rules.
+ # Unset, empty, "0" and "false" (i-case) yield False.
+ # All other values yield True.
+ value = self.get(key)
+ if not value:
+ return False
+ if value.lower() in ['0', 'false']:
+ return False
+ return True
diff --git a/compose/config/errors.py b/compose/config/errors.py
new file mode 100644
index 00000000..f5c03808
--- /dev/null
+++ b/compose/config/errors.py
@@ -0,0 +1,55 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+
+VERSION_EXPLANATION = (
+ 'You might be seeing this error because you\'re using the wrong Compose file version. '
+ 'Either specify a supported version (e.g "2.2" or "3.3") and place '
+ 'your service definitions under the `services` key, or omit the `version` key '
+ 'and place your service definitions at the root of the file to use '
+ 'version 1.\nFor more on the Compose file format versions, see '
+ 'https://docs.docker.com/compose/compose-file/')
+
+
+class ConfigurationError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class DependencyError(ConfigurationError):
+ pass
+
+
+class CircularReference(ConfigurationError):
+ def __init__(self, trail):
+ self.trail = trail
+
+ @property
+ def msg(self):
+ lines = [
+ "{} in {}".format(service_name, filename)
+ for (filename, service_name) in self.trail
+ ]
+ return "Circular reference:\n {}".format("\n extends ".join(lines))
+
+
+class ComposeFileNotFound(ConfigurationError):
+ def __init__(self, supported_filenames):
+ super(ComposeFileNotFound, self).__init__("""
+ Can't find a suitable configuration file in this directory or any
+ parent. Are you in the right directory?
+
+ Supported filenames: %s
+ """ % ", ".join(supported_filenames))
+
+
+class DuplicateOverrideFileFound(ConfigurationError):
+ def __init__(self, override_filenames):
+ self.override_filenames = override_filenames
+ super(DuplicateOverrideFileFound, self).__init__(
+ "Multiple override files found: {}. You may only use a single "
+ "override file.".format(", ".join(override_filenames))
+ )
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
new file mode 100644
index 00000000..b13ac591
--- /dev/null
+++ b/compose/config/interpolation.py
@@ -0,0 +1,102 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+from string import Template
+
+import six
+
+from .errors import ConfigurationError
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+log = logging.getLogger(__name__)
+
+
+class Interpolator(object):
+
+ def __init__(self, templater, mapping):
+ self.templater = templater
+ self.mapping = mapping
+
+ def interpolate(self, string):
+ try:
+ return self.templater(string).substitute(self.mapping)
+ except ValueError:
+ raise InvalidInterpolation(string)
+
+
+def interpolate_environment_variables(version, config, section, environment):
+ if version <= V2_0:
+ interpolator = Interpolator(Template, environment)
+ else:
+ interpolator = Interpolator(TemplateWithDefaults, environment)
+
+ def process_item(name, config_dict):
+ return dict(
+ (key, interpolate_value(name, key, val, section, interpolator))
+ for key, val in (config_dict or {}).items()
+ )
+
+ return dict(
+ (name, process_item(name, config_dict or {}))
+ for name, config_dict in config.items()
+ )
+
+
+def interpolate_value(name, config_key, value, section, interpolator):
+ try:
+ return recursive_interpolate(value, interpolator)
+ except InvalidInterpolation as e:
+ raise ConfigurationError(
+ 'Invalid interpolation format for "{config_key}" option '
+ 'in {section} "{name}": "{string}"'.format(
+ config_key=config_key,
+ name=name,
+ section=section,
+ string=e.string))
+
+
+def recursive_interpolate(obj, interpolator):
+ if isinstance(obj, six.string_types):
+ return interpolator.interpolate(obj)
+ if isinstance(obj, dict):
+ return dict(
+ (key, recursive_interpolate(val, interpolator))
+ for (key, val) in obj.items()
+ )
+ if isinstance(obj, list):
+ return [recursive_interpolate(val, interpolator) for val in obj]
+ return obj
+
+
+class TemplateWithDefaults(Template):
+ idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
+
+ # Modified from python2.7/string.py
+ def substitute(self, mapping):
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ if ':-' in named:
+ var, _, default = named.partition(':-')
+ return mapping.get(var) or default
+ if '-' in named:
+ var, _, default = named.partition('-')
+ return mapping.get(var, default)
+ val = mapping[named]
+ return '%s' % (val,)
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+
+class InvalidInterpolation(Exception):
+ def __init__(self, string):
+ self.string = string
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
new file mode 100644
index 00000000..2b8c73f1
--- /dev/null
+++ b/compose/config/serialize.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+import yaml
+
+from compose.config import types
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_4 as V3_4
+
+
+def serialize_config_type(dumper, data):
+ representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+ return representer(data.repr())
+
+
+def serialize_dict_type(dumper, data):
+ return dumper.represent_dict(data.repr())
+
+
+def serialize_string(dumper, data):
+ """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+
+ data = data.replace('$', '$$')
+
+ if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
+ # Empirically only y/n appears to be an issue, but this might change
+ # depending on which PyYaml version is being used. Err on safe side.
+ return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
+ return representer(data)
+
+
+yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
+yaml.SafeDumper.add_representer(str, serialize_string)
+yaml.SafeDumper.add_representer(six.text_type, serialize_string)
+
+
+def denormalize_config(config, image_digests=None):
+ result = {'version': str(V2_1) if config.version == V1 else str(config.version)}
+ denormalized_services = [
+ denormalize_service_dict(
+ service_dict,
+ config.version,
+ image_digests[service_dict['name']] if image_digests else None)
+ for service_dict in config.services
+ ]
+ result['services'] = {
+ service_dict.pop('name'): service_dict
+ for service_dict in denormalized_services
+ }
+
+ for key in ('networks', 'volumes', 'secrets', 'configs'):
+ config_dict = getattr(config, key)
+ if not config_dict:
+ continue
+ result[key] = config_dict.copy()
+ for name, conf in result[key].items():
+ if 'external_name' in conf:
+ del conf['external_name']
+
+ if 'name' in conf:
+ if config.version < V2_1 or (config.version >= V3_0 and config.version < V3_4):
+ del conf['name']
+ elif 'external' in conf:
+ conf['external'] = True
+
+ return result
+
+
+def serialize_config(config, image_digests=None):
+ return yaml.safe_dump(
+ denormalize_config(config, image_digests),
+ default_flow_style=False,
+ indent=2,
+ width=80
+ )
+
+
+def serialize_ns_time_value(value):
+ result = (value, 'ns')
+ table = [
+ (1000., 'us'),
+ (1000., 'ms'),
+ (1000., 's'),
+ (60., 'm'),
+ (60., 'h')
+ ]
+ for stage in table:
+ tmp = value / stage[0]
+ if tmp == int(value / stage[0]):
+ value = tmp
+ result = (int(value), stage[1])
+ else:
+ break
+ return '{0}{1}'.format(*result)
+
+
+def denormalize_service_dict(service_dict, version, image_digest=None):
+ service_dict = service_dict.copy()
+
+ if image_digest:
+ service_dict['image'] = image_digest
+
+ if 'restart' in service_dict:
+ service_dict['restart'] = types.serialize_restart_spec(
+ service_dict['restart']
+ )
+
+ if version == V1 and 'network_mode' not in service_dict:
+ service_dict['network_mode'] = 'bridge'
+
+ if 'depends_on' in service_dict and (version < V2_1 or version >= V3_0):
+ service_dict['depends_on'] = sorted([
+ svc for svc in service_dict['depends_on'].keys()
+ ])
+
+ if 'healthcheck' in service_dict:
+ if 'interval' in service_dict['healthcheck']:
+ service_dict['healthcheck']['interval'] = serialize_ns_time_value(
+ service_dict['healthcheck']['interval']
+ )
+ if 'timeout' in service_dict['healthcheck']:
+ service_dict['healthcheck']['timeout'] = serialize_ns_time_value(
+ service_dict['healthcheck']['timeout']
+ )
+
+ if 'start_period' in service_dict['healthcheck']:
+ service_dict['healthcheck']['start_period'] = serialize_ns_time_value(
+ service_dict['healthcheck']['start_period']
+ )
+ if 'ports' in service_dict and version < V3_2:
+ service_dict['ports'] = [
+ p.legacy_repr() if isinstance(p, types.ServicePort) else p
+ for p in service_dict['ports']
+ ]
+
+ return service_dict
diff --git a/compose/config/sort_services.py b/compose/config/sort_services.py
new file mode 100644
index 00000000..42f548a6
--- /dev/null
+++ b/compose/config/sort_services.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose.config.errors import DependencyError
+
+
+def get_service_name_from_network_mode(network_mode):
+ return get_source_name_from_network_mode(network_mode, 'service')
+
+
+def get_container_name_from_network_mode(network_mode):
+ return get_source_name_from_network_mode(network_mode, 'container')
+
+
+def get_source_name_from_network_mode(network_mode, source_type):
+ if not network_mode:
+ return
+
+ if not network_mode.startswith(source_type+':'):
+ return
+
+ _, net_name = network_mode.split(':', 1)
+ return net_name
+
+
+def get_service_names(links):
+ return [link.split(':')[0] for link in links]
+
+
+def get_service_names_from_volumes_from(volumes_from):
+ return [volume_from.source for volume_from in volumes_from]
+
+
+def get_service_dependents(service_dict, services):
+ name = service_dict['name']
+ return [
+ service for service in services
+ if (name in get_service_names(service.get('links', [])) or
+ name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
+ name == get_service_name_from_network_mode(service.get('network_mode')) or
+ name == get_service_name_from_network_mode(service.get('pid')) or
+ name in service.get('depends_on', []))
+ ]
+
+
+def sort_service_dicts(services):
+ # Topological sort (Cormen/Tarjan algorithm).
+ unmarked = services[:]
+ temporary_marked = set()
+ sorted_services = []
+
+ def visit(n):
+ if n['name'] in temporary_marked:
+ if n['name'] in get_service_names(n.get('links', [])):
+ raise DependencyError('A service can not link to itself: %s' % n['name'])
+ if n['name'] in n.get('volumes_from', []):
+ raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
+ if n['name'] in n.get('depends_on', []):
+ raise DependencyError('A service can not depend on itself: %s' % n['name'])
+ raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked))
+
+ if n in unmarked:
+ temporary_marked.add(n['name'])
+ for m in get_service_dependents(n, services):
+ visit(m)
+ temporary_marked.remove(n['name'])
+ unmarked.remove(n)
+ sorted_services.insert(0, n)
+
+ while unmarked:
+ visit(unmarked[-1])
+
+ return sorted_services
diff --git a/compose/config/types.py b/compose/config/types.py
new file mode 100644
index 00000000..c410343b
--- /dev/null
+++ b/compose/config/types.py
@@ -0,0 +1,351 @@
+"""
+Types for objects parsed from the configuration.
+"""
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import re
+from collections import namedtuple
+
+import six
+from docker.utils.ports import build_port_bindings
+
+from ..const import COMPOSEFILE_V1 as V1
+from .errors import ConfigurationError
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import splitdrive
+
+win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*')
+
+
+class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
+
+ # TODO: drop service_names arg when v1 is removed
+ @classmethod
+ def parse(cls, volume_from_config, service_names, version):
+ func = cls.parse_v1 if version == V1 else cls.parse_v2
+ return func(service_names, volume_from_config)
+
+ @classmethod
+ def parse_v1(cls, service_names, volume_from_config):
+ parts = volume_from_config.split(':')
+ if len(parts) > 2:
+ raise ConfigurationError(
+ "volume_from {} has incorrect format, should be "
+ "service[:mode]".format(volume_from_config))
+
+ if len(parts) == 1:
+ source = parts[0]
+ mode = 'rw'
+ else:
+ source, mode = parts
+
+ type = 'service' if source in service_names else 'container'
+ return cls(source, mode, type)
+
+ @classmethod
+ def parse_v2(cls, service_names, volume_from_config):
+ parts = volume_from_config.split(':')
+ if len(parts) > 3:
+ raise ConfigurationError(
+ "volume_from {} has incorrect format, should be one of "
+ "'<service name>[:<mode>]' or "
+ "'container:<container name>[:<mode>]'".format(volume_from_config))
+
+ if len(parts) == 1:
+ source = parts[0]
+ return cls(source, 'rw', 'service')
+
+ if len(parts) == 2:
+ if parts[0] == 'container':
+ type, source = parts
+ return cls(source, 'rw', type)
+
+ source, mode = parts
+ return cls(source, mode, 'service')
+
+ if len(parts) == 3:
+ type, source, mode = parts
+ if type not in ('service', 'container'):
+ raise ConfigurationError(
+ "Unknown volumes_from type '{}' in '{}'".format(
+ type,
+ volume_from_config))
+
+ return cls(source, mode, type)
+
+ def repr(self):
+ return '{v.type}:{v.source}:{v.mode}'.format(v=self)
+
+
+def parse_restart_spec(restart_config):
+ if not restart_config:
+ return None
+ parts = restart_config.split(':')
+ if len(parts) > 2:
+ raise ConfigurationError(
+ "Restart %s has incorrect format, should be "
+ "mode[:max_retry]" % restart_config)
+ if len(parts) == 2:
+ name, max_retry_count = parts
+ else:
+ name, = parts
+ max_retry_count = 0
+
+ return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
+
+
+def serialize_restart_spec(restart_spec):
+ if not restart_spec:
+ return ''
+ parts = [restart_spec['Name']]
+ if restart_spec['MaximumRetryCount']:
+ parts.append(six.text_type(restart_spec['MaximumRetryCount']))
+ return ':'.join(parts)
+
+
+def parse_extra_hosts(extra_hosts_config):
+ if not extra_hosts_config:
+ return {}
+
+ if isinstance(extra_hosts_config, dict):
+ return dict(extra_hosts_config)
+
+ if isinstance(extra_hosts_config, list):
+ extra_hosts_dict = {}
+ for extra_hosts_line in extra_hosts_config:
+ # TODO: validate string contains ':' ?
+ host, ip = extra_hosts_line.split(':', 1)
+ extra_hosts_dict[host.strip()] = ip.strip()
+ return extra_hosts_dict
+
+
+def normalize_path_for_engine(path):
+ """Windows paths, c:\my\path\shiny, need to be changed to be compatible with
+ the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
+ """
+ drive, tail = splitdrive(path)
+
+ if drive:
+ path = '/' + drive.lower().rstrip(':') + tail
+
+ return path.replace('\\', '/')
+
+
+class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
+
+ @classmethod
+ def _parse_unix(cls, volume_config):
+ parts = volume_config.split(':')
+
+ if len(parts) > 3:
+ raise ConfigurationError(
+ "Volume %s has incorrect format, should be "
+ "external:internal[:mode]" % volume_config)
+
+ if len(parts) == 1:
+ external = None
+ internal = os.path.normpath(parts[0])
+ else:
+ external = os.path.normpath(parts[0])
+ internal = os.path.normpath(parts[1])
+
+ mode = 'rw'
+ if len(parts) == 3:
+ mode = parts[2]
+
+ return cls(external, internal, mode)
+
+ @classmethod
+ def _parse_win32(cls, volume_config, normalize):
+ # relative paths in windows expand to include the drive, eg C:\
+ # so we join the first 2 parts back together to count as one
+ mode = 'rw'
+
+ def separate_next_section(volume_config):
+ drive, tail = splitdrive(volume_config)
+ parts = tail.split(':', 1)
+ if drive:
+ parts[0] = drive + parts[0]
+ return parts
+
+ parts = separate_next_section(volume_config)
+ if len(parts) == 1:
+ internal = parts[0]
+ external = None
+ else:
+ external = parts[0]
+ parts = separate_next_section(parts[1])
+ external = os.path.normpath(external)
+ internal = parts[0]
+ if len(parts) > 1:
+ if ':' in parts[1]:
+ raise ConfigurationError(
+ "Volume %s has incorrect format, should be "
+ "external:internal[:mode]" % volume_config
+ )
+ mode = parts[1]
+
+ if normalize:
+ external = normalize_path_for_engine(external) if external else None
+
+ return cls(external, internal, mode)
+
+ @classmethod
+ def parse(cls, volume_config, normalize=False):
+ """Parse a volume_config path and split it into external:internal[:mode]
+ parts to be returned as a valid VolumeSpec.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return cls._parse_win32(volume_config, normalize)
+ else:
+ return cls._parse_unix(volume_config)
+
+ def repr(self):
+ external = self.external + ':' if self.external else ''
+ mode = ':' + self.mode if self.external else ''
+ return '{ext}{v.internal}{mode}'.format(mode=mode, ext=external, v=self)
+
+ @property
+ def is_named_volume(self):
+ res = self.external and not self.external.startswith(('.', '/', '~'))
+ if not IS_WINDOWS_PLATFORM:
+ return res
+
+ return (
+ res and not self.external.startswith('\\') and
+ not win32_root_path_pattern.match(self.external)
+ )
+
+
+class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
+
+ @classmethod
+ def parse(cls, link_spec):
+ target, _, alias = link_spec.partition(':')
+ if not alias:
+ alias = target
+ return cls(target, alias)
+
+ def repr(self):
+ if self.target == self.alias:
+ return self.target
+ return '{s.target}:{s.alias}'.format(s=self)
+
+ @property
+ def merge_field(self):
+ return self.alias
+
+
+class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode')):
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, six.string_types):
+ return cls(spec, None, None, None, None)
+ return cls(
+ spec.get('source'),
+ spec.get('target'),
+ spec.get('uid'),
+ spec.get('gid'),
+ spec.get('mode'),
+ )
+
+ @property
+ def merge_field(self):
+ return self.source
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+
+class ServiceSecret(ServiceConfigBase):
+ pass
+
+
+class ServiceConfig(ServiceConfigBase):
+ pass
+
+
+class ServicePort(namedtuple('_ServicePort', 'target published protocol mode external_ip')):
+ def __new__(cls, target, published, *args, **kwargs):
+ try:
+ if target:
+ target = int(target)
+ except ValueError:
+ raise ConfigurationError('Invalid target port: {}'.format(target))
+
+ try:
+ if published:
+ published = int(published)
+ except ValueError:
+ raise ConfigurationError('Invalid published port: {}'.format(published))
+
+ return super(ServicePort, cls).__new__(
+ cls, target, published, *args, **kwargs
+ )
+
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, cls):
+ # When extending a service with ports, the port definitions have already been parsed
+ return [spec]
+
+ if not isinstance(spec, dict):
+ result = []
+ try:
+ for k, v in build_port_bindings([spec]).items():
+ if '/' in k:
+ target, proto = k.split('/', 1)
+ else:
+ target, proto = (k, None)
+ for pub in v:
+ if pub is None:
+ result.append(
+ cls(target, None, proto, None, None)
+ )
+ elif isinstance(pub, tuple):
+ result.append(
+ cls(target, pub[1], proto, None, pub[0])
+ )
+ else:
+ result.append(
+ cls(target, pub, proto, None, None)
+ )
+ except ValueError as e:
+ raise ConfigurationError(str(e))
+
+ return result
+
+ return [cls(
+ spec.get('target'),
+ spec.get('published'),
+ spec.get('protocol'),
+ spec.get('mode'),
+ None
+ )]
+
+ @property
+ def merge_field(self):
+ return (self.target, self.published, self.external_ip, self.protocol)
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+ def legacy_repr(self):
+ return normalize_port_dict(self.repr())
+
+
+def normalize_port_dict(port):
+ return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
+ published=port.get('published', ''),
+ is_pub=(':' if port.get('published') is not None or port.get('external_ip') else ''),
+ target=port.get('target'),
+ protocol=port.get('protocol', 'tcp'),
+ external_ip=port.get('external_ip', ''),
+ has_ext_ip=(':' if port.get('external_ip') else ''),
+ )
diff --git a/compose/config/validation.py b/compose/config/validation.py
new file mode 100644
index 00000000..940775a2
--- /dev/null
+++ b/compose/config/validation.py
@@ -0,0 +1,467 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+import os
+import re
+import sys
+
+import six
+from docker.utils.ports import split_port
+from jsonschema import Draft4Validator
+from jsonschema import FormatChecker
+from jsonschema import RefResolver
+from jsonschema import ValidationError
+
+from ..const import COMPOSEFILE_V1 as V1
+from ..const import NANOCPUS_SCALE
+from .errors import ConfigurationError
+from .errors import VERSION_EXPLANATION
+from .sort_services import get_service_name_from_network_mode
+
+
+log = logging.getLogger(__name__)
+
+
+DOCKER_CONFIG_HINTS = {
+ 'cpu_share': 'cpu_shares',
+ 'add_host': 'extra_hosts',
+ 'hosts': 'extra_hosts',
+ 'extra_host': 'extra_hosts',
+ 'device': 'devices',
+ 'link': 'links',
+ 'memory_swap': 'memswap_limit',
+ 'port': 'ports',
+ 'privilege': 'privileged',
+ 'priviliged': 'privileged',
+ 'privilige': 'privileged',
+ 'volume': 'volumes',
+ 'workdir': 'working_dir',
+}
+
+
+VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
+VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
+
+
+@FormatChecker.cls_checks(format="ports", raises=ValidationError)
+def format_ports(instance):
+ try:
+ split_port(instance)
+ except ValueError as e:
+ raise ValidationError(six.text_type(e))
+ return True
+
+
+@FormatChecker.cls_checks(format="expose", raises=ValidationError)
+def format_expose(instance):
+ if isinstance(instance, six.string_types):
+ if not re.match(VALID_EXPOSE_FORMAT, instance):
+ raise ValidationError(
+ "should be of the format 'PORT[/PROTOCOL]'")
+
+ return True
+
+
+def match_named_volumes(service_dict, project_volumes):
+ service_volumes = service_dict.get('volumes', [])
+ for volume_spec in service_volumes:
+ if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
+ raise ConfigurationError(
+ 'Named volume "{0}" is used in service "{1}" but no'
+ ' declaration was found in the volumes section.'.format(
+ volume_spec.repr(), service_dict.get('name')
+ )
+ )
+
+
+def python_type_to_yaml_type(type_):
+ type_name = type(type_).__name__
+ return {
+ 'dict': 'mapping',
+ 'list': 'array',
+ 'int': 'number',
+ 'float': 'number',
+ 'bool': 'boolean',
+ 'unicode': 'string',
+ 'str': 'string',
+ 'bytes': 'string',
+ }.get(type_name, type_name)
+
+
+def validate_config_section(filename, config, section):
+ """Validate the structure of a configuration section. This must be done
+ before interpolation so it's separate from schema validation.
+ """
+ if not isinstance(config, dict):
+ raise ConfigurationError(
+ "In file '{filename}', {section} must be a mapping, not "
+ "{type}.".format(
+ filename=filename,
+ section=section,
+ type=anglicize_json_type(python_type_to_yaml_type(config))))
+
+ for key, value in config.items():
+ if not isinstance(key, six.string_types):
+ raise ConfigurationError(
+ "In file '{filename}', the {section} name {name} must be a "
+ "quoted string, i.e. '{name}'.".format(
+ filename=filename,
+ section=section,
+ name=key))
+
+ if not isinstance(value, (dict, type(None))):
+ raise ConfigurationError(
+ "In file '{filename}', {section} '{name}' must be a mapping not "
+ "{type}.".format(
+ filename=filename,
+ section=section,
+ name=key,
+ type=anglicize_json_type(python_type_to_yaml_type(value))))
+
+
+def validate_top_level_object(config_file):
+ if not isinstance(config_file.config, dict):
+ raise ConfigurationError(
+ "Top level object in '{}' needs to be an object not '{}'.".format(
+ config_file.filename,
+ type(config_file.config)))
+
+
+def validate_ulimits(service_config):
+ ulimit_config = service_config.config.get('ulimits', {})
+ for limit_name, soft_hard_values in six.iteritems(ulimit_config):
+ if isinstance(soft_hard_values, dict):
+ if not soft_hard_values['soft'] <= soft_hard_values['hard']:
+ raise ConfigurationError(
+ "Service '{s.name}' has invalid ulimit '{ulimit}'. "
+ "'soft' value can not be greater than 'hard' value ".format(
+ s=service_config,
+ ulimit=ulimit_config))
+
+
+def validate_extends_file_path(service_name, extends_options, filename):
+ """
+ The service to be extended must either be defined in the config key 'file',
+ or within 'filename'.
+ """
+ error_prefix = "Invalid 'extends' configuration for %s:" % service_name
+
+ if 'file' not in extends_options and filename is None:
+ raise ConfigurationError(
+ "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
+ )
+
+
+def validate_network_mode(service_config, service_names):
+ network_mode = service_config.config.get('network_mode')
+ if not network_mode:
+ return
+
+ if 'networks' in service_config.config:
+ raise ConfigurationError("'network_mode' and 'networks' cannot be combined")
+
+ dependency = get_service_name_from_network_mode(network_mode)
+ if not dependency:
+ return
+
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' uses the network stack of service '{dep}' which "
+ "is undefined.".format(s=service_config, dep=dependency))
+
+
+def validate_pid_mode(service_config, service_names):
+ pid_mode = service_config.config.get('pid')
+ if not pid_mode:
+ return
+
+ dependency = get_service_name_from_network_mode(pid_mode)
+ if not dependency:
+ return
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' uses the PID namespace of service '{dep}' which "
+ "is undefined.".format(s=service_config, dep=dependency)
+ )
+
+
+def validate_links(service_config, service_names):
+ for link in service_config.config.get('links', []):
+ if link.split(':')[0] not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' has a link to service '{link}' which is "
+ "undefined.".format(s=service_config, link=link))
+
+
+def validate_depends_on(service_config, service_names):
+ deps = service_config.config.get('depends_on', {})
+ for dependency in deps.keys():
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' depends on service '{dep}' which is "
+ "undefined.".format(s=service_config, dep=dependency)
+ )
+
+
+def get_unsupported_config_msg(path, error_key):
+ msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
+ if error_key in DOCKER_CONFIG_HINTS:
+ msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
+ return msg
+
+
+def anglicize_json_type(json_type):
+ if json_type.startswith(('a', 'e', 'i', 'o', 'u')):
+ return 'an ' + json_type
+ return 'a ' + json_type
+
+
+def is_service_dict_schema(schema_id):
+ return schema_id in ('config_schema_v1.json', '#/properties/services')
+
+
+def handle_error_for_schema_with_id(error, path):
+ schema_id = error.schema['id']
+
+ if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
+ return "Invalid service name '{}' - only {} characters are allowed".format(
+ # The service_name is one of the keys in the json object
+ [i for i in list(error.instance) if not i or any(filter(
+ lambda c: not re.match(VALID_NAME_CHARS, c), i
+ ))][0],
+ VALID_NAME_CHARS
+ )
+
+ if error.validator == 'additionalProperties':
+ if schema_id == '#/definitions/service':
+ invalid_config_key = parse_key_from_error_msg(error)
+ return get_unsupported_config_msg(path, invalid_config_key)
+
+ if schema_id.startswith('config_schema_v'):
+ invalid_config_key = parse_key_from_error_msg(error)
+ return ('Invalid top-level property "{key}". Valid top-level '
+ 'sections for this Compose file are: {properties}, and '
+ 'extensions starting with "x-".\n\n{explanation}').format(
+ key=invalid_config_key,
+ properties=', '.join(error.schema['properties'].keys()),
+ explanation=VERSION_EXPLANATION
+ )
+
+ if not error.path:
+ return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
+
+
+def handle_generic_error(error, path):
+ msg_format = None
+ error_msg = error.message
+
+ if error.validator == 'oneOf':
+ msg_format = "{path} {msg}"
+ config_key, error_msg = _parse_oneof_validator(error)
+ if config_key:
+ path.append(config_key)
+
+ elif error.validator == 'type':
+ msg_format = "{path} contains an invalid type, it should be {msg}"
+ error_msg = _parse_valid_types_from_validator(error.validator_value)
+
+ elif error.validator == 'required':
+ error_msg = ", ".join(error.validator_value)
+ msg_format = "{path} is invalid, {msg} is required."
+
+ elif error.validator == 'dependencies':
+ config_key = list(error.validator_value.keys())[0]
+ required_keys = ",".join(error.validator_value[config_key])
+
+ msg_format = "{path} is invalid: {msg}"
+ path.append(config_key)
+ error_msg = "when defining '{}' you must set '{}' as well".format(
+ config_key,
+ required_keys)
+
+ elif error.cause:
+ error_msg = six.text_type(error.cause)
+ msg_format = "{path} is invalid: {msg}"
+
+ elif error.path:
+ msg_format = "{path} value {msg}"
+
+ if msg_format:
+ return msg_format.format(path=path_string(path), msg=error_msg)
+
+ return error.message
+
+
+def parse_key_from_error_msg(error):
+ return error.message.split("'")[1]
+
+
+def path_string(path):
+ return ".".join(c for c in path if isinstance(c, six.string_types))
+
+
+def _parse_valid_types_from_validator(validator):
+ """A validator value can be either an array of valid types or a string of
+ a valid type. Parse the valid types and prefix with the correct article.
+ """
+ if not isinstance(validator, list):
+ return anglicize_json_type(validator)
+
+ if len(validator) == 1:
+ return anglicize_json_type(validator[0])
+
+ return "{}, or {}".format(
+ ", ".join([anglicize_json_type(validator[0])] + validator[1:-1]),
+ anglicize_json_type(validator[-1]))
+
+
+def _parse_oneof_validator(error):
+ """oneOf has multiple schemas, so we need to reason about which schema, sub
+ schema or constraint the validation is failing on.
+ Inspecting the context value of a ValidationError gives us information about
+ which sub schema failed and which kind of error it is.
+ """
+ types = []
+ for context in error.context:
+ if context.validator == 'oneOf':
+ _, error_msg = _parse_oneof_validator(context)
+ return path_string(context.path), error_msg
+
+ if context.validator == 'required':
+ return (None, context.message)
+
+ if context.validator == 'additionalProperties':
+ invalid_config_key = parse_key_from_error_msg(context)
+ return (None, "contains unsupported option: '{}'".format(invalid_config_key))
+
+ if context.validator == 'uniqueItems':
+ return (
+ path_string(context.path) if context.path else None,
+ "contains non-unique items, please remove duplicates from {}".format(
+ context.instance),
+ )
+
+ if context.path:
+ return (
+ path_string(context.path),
+ "contains {}, which is an invalid type, it should be {}".format(
+ json.dumps(context.instance),
+ _parse_valid_types_from_validator(context.validator_value)),
+ )
+
+ if context.validator == 'type':
+ types.append(context.validator_value)
+
+ valid_types = _parse_valid_types_from_validator(types)
+ return (None, "contains an invalid type, it should be {}".format(valid_types))
+
+
+def process_service_constraint_errors(error, service_name, version):
+ if version == V1:
+ if 'image' in error.instance and 'build' in error.instance:
+ return (
+ "Service {} has both an image and build path specified. "
+ "A service can either be built to image or use an existing "
+ "image, not both.".format(service_name))
+
+ if 'image' in error.instance and 'dockerfile' in error.instance:
+ return (
+ "Service {} has both an image and alternate Dockerfile. "
+ "A service can either be built to image or use an existing "
+ "image, not both.".format(service_name))
+
+ if 'image' not in error.instance and 'build' not in error.instance:
+ return (
+ "Service {} has neither an image nor a build context specified. "
+ "At least one must be provided.".format(service_name))
+
+
+def process_config_schema_errors(error):
+ path = list(error.path)
+
+ if 'id' in error.schema:
+ error_msg = handle_error_for_schema_with_id(error, path)
+ if error_msg:
+ return error_msg
+
+ return handle_generic_error(error, path)
+
+
+def validate_against_config_schema(config_file):
+ schema = load_jsonschema(config_file)
+ format_checker = FormatChecker(["ports", "expose"])
+ validator = Draft4Validator(
+ schema,
+ resolver=RefResolver(get_resolver_path(), schema),
+ format_checker=format_checker)
+ handle_errors(
+ validator.iter_errors(config_file.config),
+ process_config_schema_errors,
+ config_file.filename)
+
+
+def validate_service_constraints(config, service_name, config_file):
+ def handler(errors):
+ return process_service_constraint_errors(
+ errors, service_name, config_file.version)
+
+ schema = load_jsonschema(config_file)
+ validator = Draft4Validator(schema['definitions']['constraints']['service'])
+ handle_errors(validator.iter_errors(config), handler, None)
+
+
+def validate_cpu(service_config):
+ cpus = service_config.config.get('cpus')
+ if not cpus:
+ return
+ nano_cpus = cpus * NANOCPUS_SCALE
+ if isinstance(nano_cpus, float) and not nano_cpus.is_integer():
+ raise ConfigurationError(
+ "cpus must have nine or less digits after decimal point")
+
+
+def get_schema_path():
+ return os.path.dirname(os.path.abspath(__file__))
+
+
+def load_jsonschema(config_file):
+ filename = os.path.join(
+ get_schema_path(),
+ "config_schema_v{0}.json".format(config_file.version))
+
+ if not os.path.exists(filename):
+ raise ConfigurationError(
+ 'Version in "{}" is unsupported. {}'
+ .format(config_file.filename, VERSION_EXPLANATION))
+
+ with open(filename, "r") as fh:
+ return json.load(fh)
+
+
+def get_resolver_path():
+ schema_path = get_schema_path()
+ if sys.platform == "win32":
+ scheme = "///"
+ # TODO: why is this necessary?
+ schema_path = schema_path.replace('\\', '/')
+ else:
+ scheme = "//"
+ return "file:{}{}/".format(scheme, schema_path)
+
+
+def handle_errors(errors, format_error_func, filename):
+ """jsonschema returns an error tree full of information to explain what has
+ gone wrong. Process each error and pull out relevant information and re-write
+ helpful error messages that are relevant.
+ """
+ errors = list(sorted(errors, key=str))
+ if not errors:
+ return
+
+ error_msg = '\n'.join(format_error_func(error) for error in errors)
+ raise ConfigurationError(
+ "The Compose file{file_msg} is invalid because:\n{error_msg}".format(
+ file_msg=" '{}'".format(filename) if filename else "",
+ error_msg=error_msg))
diff --git a/compose/const.py b/compose/const.py
new file mode 100644
index 00000000..2ac08b89
--- /dev/null
+++ b/compose/const.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+
+from .version import ComposeVersion
+
+DEFAULT_TIMEOUT = 10
+HTTP_TIMEOUT = 60
+IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
+IS_WINDOWS_PLATFORM = (sys.platform == "win32")
+LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
+LABEL_ONE_OFF = 'com.docker.compose.oneoff'
+LABEL_PROJECT = 'com.docker.compose.project'
+LABEL_SERVICE = 'com.docker.compose.service'
+LABEL_NETWORK = 'com.docker.compose.network'
+LABEL_VERSION = 'com.docker.compose.version'
+LABEL_VOLUME = 'com.docker.compose.volume'
+LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
+NANOCPUS_SCALE = 1000000000
+
+SECRETS_PATH = '/run/secrets'
+
+COMPOSEFILE_V1 = ComposeVersion('1')
+COMPOSEFILE_V2_0 = ComposeVersion('2.0')
+COMPOSEFILE_V2_1 = ComposeVersion('2.1')
+COMPOSEFILE_V2_2 = ComposeVersion('2.2')
+COMPOSEFILE_V2_3 = ComposeVersion('2.3')
+
+COMPOSEFILE_V3_0 = ComposeVersion('3.0')
+COMPOSEFILE_V3_1 = ComposeVersion('3.1')
+COMPOSEFILE_V3_2 = ComposeVersion('3.2')
+COMPOSEFILE_V3_3 = ComposeVersion('3.3')
+COMPOSEFILE_V3_4 = ComposeVersion('3.4')
+COMPOSEFILE_V3_5 = ComposeVersion('3.5')
+
+API_VERSIONS = {
+ COMPOSEFILE_V1: '1.21',
+ COMPOSEFILE_V2_0: '1.22',
+ COMPOSEFILE_V2_1: '1.24',
+ COMPOSEFILE_V2_2: '1.25',
+ COMPOSEFILE_V2_3: '1.30',
+ COMPOSEFILE_V3_0: '1.25',
+ COMPOSEFILE_V3_1: '1.25',
+ COMPOSEFILE_V3_2: '1.25',
+ COMPOSEFILE_V3_3: '1.30',
+ COMPOSEFILE_V3_4: '1.30',
+ COMPOSEFILE_V3_5: '1.30',
+}
+
+API_VERSION_TO_ENGINE_VERSION = {
+ API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
+ API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
+ API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
+ API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V2_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
+}
diff --git a/compose/container.py b/compose/container.py
new file mode 100644
index 00000000..4bc7f54f
--- /dev/null
+++ b/compose/container.py
@@ -0,0 +1,276 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from functools import reduce
+
+import six
+
+from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+
+
+class Container(object):
+ """
+ Represents a Docker container, constructed from the output of
+ GET /containers/:id:/json.
+ """
+ def __init__(self, client, dictionary, has_been_inspected=False):
+ self.client = client
+ self.dictionary = dictionary
+ self.has_been_inspected = has_been_inspected
+ self.log_stream = None
+
+ @classmethod
+ def from_ps(cls, client, dictionary, **kwargs):
+ """
+ Construct a container object from the output of GET /containers/json.
+ """
+ name = get_container_name(dictionary)
+ if name is None:
+ return None
+
+ new_dictionary = {
+ 'Id': dictionary['Id'],
+ 'Image': dictionary['Image'],
+ 'Name': '/' + name,
+ }
+ return cls(client, new_dictionary, **kwargs)
+
+ @classmethod
+ def from_id(cls, client, id):
+ return cls(client, client.inspect_container(id), has_been_inspected=True)
+
+ @classmethod
+ def create(cls, client, **options):
+ response = client.create_container(**options)
+ return cls.from_id(client, response['Id'])
+
+ @property
+ def id(self):
+ return self.dictionary['Id']
+
+ @property
+ def image(self):
+ return self.dictionary['Image']
+
+ @property
+ def image_config(self):
+ return self.client.inspect_image(self.image)
+
+ @property
+ def short_id(self):
+ return self.id[:12]
+
+ @property
+ def name(self):
+ return self.dictionary['Name'][1:]
+
+ @property
+ def service(self):
+ return self.labels.get(LABEL_SERVICE)
+
+ @property
+ def name_without_project(self):
+ project = self.labels.get(LABEL_PROJECT)
+
+ if self.name.startswith('{0}_{1}'.format(project, self.service)):
+ return '{0}_{1}'.format(self.service, self.number)
+ else:
+ return self.name
+
+ @property
+ def number(self):
+ number = self.labels.get(LABEL_CONTAINER_NUMBER)
+ if not number:
+ raise ValueError("Container {0} does not have a {1} label".format(
+ self.short_id, LABEL_CONTAINER_NUMBER))
+ return int(number)
+
+ @property
+ def ports(self):
+ self.inspect_if_not_inspected()
+ return self.get('NetworkSettings.Ports') or {}
+
+ @property
+ def human_readable_ports(self):
+ def format_port(private, public):
+ if not public:
+ return [private]
+ return [
+ '{HostIp}:{HostPort}->{private}'.format(private=private, **pub)
+ for pub in public
+ ]
+
+ return ', '.join(
+ ','.join(format_port(*item))
+ for item in sorted(six.iteritems(self.ports))
+ )
+
+ @property
+ def labels(self):
+ return self.get('Config.Labels') or {}
+
+ @property
+ def stop_signal(self):
+ return self.get('Config.StopSignal')
+
+ @property
+ def log_config(self):
+ return self.get('HostConfig.LogConfig') or None
+
+ @property
+ def human_readable_state(self):
+ if self.is_paused:
+ return 'Paused'
+ if self.is_restarting:
+ return 'Restarting'
+ if self.is_running:
+ return 'Ghost' if self.get('State.Ghost') else 'Up'
+ else:
+ return 'Exit %s' % self.get('State.ExitCode')
+
+ @property
+ def human_readable_command(self):
+ entrypoint = self.get('Config.Entrypoint') or []
+ cmd = self.get('Config.Cmd') or []
+ return ' '.join(entrypoint + cmd)
+
+ @property
+ def environment(self):
+ def parse_env(var):
+ if '=' in var:
+ return var.split("=", 1)
+ return var, None
+ return dict(parse_env(var) for var in self.get('Config.Env') or [])
+
+ @property
+ def exit_code(self):
+ return self.get('State.ExitCode')
+
+ @property
+ def is_running(self):
+ return self.get('State.Running')
+
+ @property
+ def is_restarting(self):
+ return self.get('State.Restarting')
+
+ @property
+ def is_paused(self):
+ return self.get('State.Paused')
+
+ @property
+ def log_driver(self):
+ return self.get('HostConfig.LogConfig.Type')
+
+ @property
+ def has_api_logs(self):
+ log_type = self.log_driver
+ return not log_type or log_type in ('json-file', 'journald')
+
+ def attach_log_stream(self):
+ """A log stream can only be attached if the container uses a json-file
+ log driver.
+ """
+ if self.has_api_logs:
+ self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
+
+ def get(self, key):
+ """Return a value from the container or None if the value is not set.
+
+ :param key: a string using dotted notation for nested dictionary
+ lookups
+ """
+ self.inspect_if_not_inspected()
+
+ def get_value(dictionary, key):
+ return (dictionary or {}).get(key)
+
+ return reduce(get_value, key.split('.'), self.dictionary)
+
+ def get_local_port(self, port, protocol='tcp'):
+ port = self.ports.get("%s/%s" % (port, protocol))
+ return "{HostIp}:{HostPort}".format(**port[0]) if port else None
+
+ def get_mount(self, mount_dest):
+ for mount in self.get('Mounts'):
+ if mount['Destination'] == mount_dest:
+ return mount
+ return None
+
+ def start(self, **options):
+ return self.client.start(self.id, **options)
+
+ def stop(self, **options):
+ return self.client.stop(self.id, **options)
+
+ def pause(self, **options):
+ return self.client.pause(self.id, **options)
+
+ def unpause(self, **options):
+ return self.client.unpause(self.id, **options)
+
+ def kill(self, **options):
+ return self.client.kill(self.id, **options)
+
+ def restart(self, **options):
+ return self.client.restart(self.id, **options)
+
+ def remove(self, **options):
+ return self.client.remove_container(self.id, **options)
+
+ def create_exec(self, command, **options):
+ return self.client.exec_create(self.id, command, **options)
+
+ def start_exec(self, exec_id, **options):
+ return self.client.exec_start(exec_id, **options)
+
+ def rename_to_tmp_name(self):
+ """Rename the container to a hopefully unique temporary container name
+ by prepending the short id.
+ """
+ self.client.rename(
+ self.id,
+ '%s_%s' % (self.short_id, self.name)
+ )
+
+ def inspect_if_not_inspected(self):
+ if not self.has_been_inspected:
+ self.inspect()
+
+ def wait(self):
+ return self.client.wait(self.id)
+
+ def logs(self, *args, **kwargs):
+ return self.client.logs(self.id, *args, **kwargs)
+
+ def inspect(self):
+ self.dictionary = self.client.inspect_container(self.id)
+ self.has_been_inspected = True
+ return self.dictionary
+
+ def attach(self, *args, **kwargs):
+ return self.client.attach(self.id, *args, **kwargs)
+
+ def __repr__(self):
+ return '<Container: %s (%s)>' % (self.name, self.id[:6])
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return False
+ return self.id == other.id
+
+ def __hash__(self):
+ return self.id.__hash__()
+
+
+def get_container_name(container):
+ if not container.get('Name') and not container.get('Names'):
+ return None
+ # inspect
+ if 'Name' in container:
+ return container['Name']
+ # ps
+ shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
+ return shortest_name.split('/')[-1]
diff --git a/compose/errors.py b/compose/errors.py
new file mode 100644
index 00000000..415b41e7
--- /dev/null
+++ b/compose/errors.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+
+class OperationFailedError(Exception):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckException(Exception):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckFailed(HealthCheckException):
+ def __init__(self, container_id):
+ super(HealthCheckFailed, self).__init__(
+ 'Container "{}" is unhealthy.'.format(container_id)
+ )
+
+
+class NoHealthCheckConfigured(HealthCheckException):
+ def __init__(self, service_name):
+ super(NoHealthCheckConfigured, self).__init__(
+ 'Service "{}" is missing a healthcheck configuration'.format(
+ service_name
+ )
+ )
diff --git a/compose/network.py b/compose/network.py
new file mode 100644
index 00000000..2e0a7e6e
--- /dev/null
+++ b/compose/network.py
@@ -0,0 +1,286 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from docker.errors import NotFound
+from docker.types import IPAMConfig
+from docker.types import IPAMPool
+from docker.utils import version_gte
+from docker.utils import version_lt
+
+from .config import ConfigurationError
+from .const import LABEL_NETWORK
+from .const import LABEL_PROJECT
+
+
+log = logging.getLogger(__name__)
+
+OPTS_EXCEPTIONS = [
+ 'com.docker.network.driver.overlay.vxlanid_list',
+ 'com.docker.network.windowsshim.hnsid',
+ 'com.docker.network.windowsshim.networkname'
+]
+
+
+class Network(object):
+ def __init__(self, client, project, name, driver=None, driver_opts=None,
+ ipam=None, external_name=None, internal=False, enable_ipv6=False,
+ labels=None):
+ self.client = client
+ self.project = project
+ self.name = name
+ self.driver = driver
+ self.driver_opts = driver_opts
+ self.ipam = create_ipam_config_from_dict(ipam)
+ self.external_name = external_name
+ self.internal = internal
+ self.enable_ipv6 = enable_ipv6
+ self.labels = labels
+
+ def ensure(self):
+ if self.external_name:
+ try:
+ self.inspect()
+ log.debug(
+ 'Network {0} declared as external. No new '
+ 'network will be created.'.format(self.name)
+ )
+ except NotFound:
+ raise ConfigurationError(
+ 'Network {name} declared as external, but could'
+ ' not be found. Please create the network manually'
+ ' using `{command} {name}` and try again.'.format(
+ name=self.external_name,
+ command='docker network create'
+ )
+ )
+ return
+
+ try:
+ data = self.inspect()
+ check_remote_network_config(data, self)
+ except NotFound:
+ driver_name = 'the default driver'
+ if self.driver:
+ driver_name = 'driver "{}"'.format(self.driver)
+
+ log.info(
+ 'Creating network "{}" with {}'
+ .format(self.full_name, driver_name)
+ )
+
+ self.client.create_network(
+ name=self.full_name,
+ driver=self.driver,
+ options=self.driver_opts,
+ ipam=self.ipam,
+ internal=self.internal,
+ enable_ipv6=self.enable_ipv6,
+ labels=self._labels,
+ attachable=version_gte(self.client._version, '1.24') or None,
+ check_duplicate=True,
+ )
+
+ def remove(self):
+ if self.external_name:
+ log.info("Network %s is external, skipping", self.full_name)
+ return
+
+ log.info("Removing network {}".format(self.full_name))
+ self.client.remove_network(self.full_name)
+
+ def inspect(self):
+ return self.client.inspect_network(self.full_name)
+
+ @property
+ def full_name(self):
+ if self.external_name:
+ return self.external_name
+ return '{0}_{1}'.format(self.project, self.name)
+
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_NETWORK: self.name,
+ })
+ return labels
+
+
+def create_ipam_config_from_dict(ipam_dict):
+ if not ipam_dict:
+ return None
+
+ return IPAMConfig(
+ driver=ipam_dict.get('driver'),
+ pool_configs=[
+ IPAMPool(
+ subnet=config.get('subnet'),
+ iprange=config.get('ip_range'),
+ gateway=config.get('gateway'),
+ aux_addresses=config.get('aux_addresses'),
+ )
+ for config in ipam_dict.get('config', [])
+ ],
+ options=ipam_dict.get('options')
+ )
+
+
+class NetworkConfigChangedError(ConfigurationError):
+ def __init__(self, net_name, property_name):
+ super(NetworkConfigChangedError, self).__init__(
+ 'Network "{}" needs to be recreated - {} has changed'.format(
+ net_name, property_name
+ )
+ )
+
+
+def check_remote_ipam_config(remote, local):
+ remote_ipam = remote.get('IPAM')
+ ipam_dict = create_ipam_config_from_dict(local.ipam)
+ if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM driver')
+ if len(ipam_dict['Config']) != 0:
+ if len(ipam_dict['Config']) != len(remote_ipam['Config']):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM configs')
+ remote_configs = sorted(remote_ipam['Config'], key='Subnet')
+ local_configs = sorted(ipam_dict['Config'], key='Subnet')
+ while local_configs:
+ lc = local_configs.pop()
+ rc = remote_configs.pop()
+ if lc.get('Subnet') != rc.get('Subnet'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config subnet')
+ if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config gateway')
+ if lc.get('IPRange') != rc.get('IPRange'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config ip_range')
+ if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
+
+ remote_opts = remote_ipam.get('Options') or {}
+ local_opts = local.ipam.get('options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+
+
+def check_remote_network_config(remote, local):
+ if local.driver and remote.get('Driver') != local.driver:
+ raise NetworkConfigChangedError(local.full_name, 'driver')
+ local_opts = local.driver_opts or {}
+ remote_opts = remote.get('Options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if k in OPTS_EXCEPTIONS:
+ continue
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'option "{}"'.format(k))
+
+ if local.ipam is not None:
+ check_remote_ipam_config(remote, local)
+
+ if local.internal is not None and local.internal != remote.get('Internal', False):
+ raise NetworkConfigChangedError(local.full_name, 'internal')
+ if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
+ raise NetworkConfigChangedError(local.full_name, 'enable_ipv6')
+
+ local_labels = local.labels or {}
+ remote_labels = remote.get('Labels', {})
+ for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
+ if k.startswith('com.docker.'): # We are only interested in user-specified labels
+ continue
+ if remote_labels.get(k) != local_labels.get(k):
+ log.warn(
+ 'Network {}: label "{}" has changed. It may need to be'
+ ' recreated.'.format(local.full_name, k)
+ )
+
+
+def build_networks(name, config_data, client):
+ network_config = config_data.networks or {}
+ networks = {
+ network_name: Network(
+ client=client, project=name, name=network_name,
+ driver=data.get('driver'),
+ driver_opts=data.get('driver_opts'),
+ ipam=data.get('ipam'),
+ external_name=data.get('external_name'),
+ internal=data.get('internal'),
+ enable_ipv6=data.get('enable_ipv6'),
+ labels=data.get('labels'),
+ )
+ for network_name, data in network_config.items()
+ }
+
+ if 'default' not in networks:
+ networks['default'] = Network(client, name, 'default')
+
+ return networks
+
+
+class ProjectNetworks(object):
+
+ def __init__(self, networks, use_networking):
+ self.networks = networks or {}
+ self.use_networking = use_networking
+
+ @classmethod
+ def from_services(cls, services, networks, use_networking):
+ service_networks = {
+ network: networks.get(network)
+ for service in services
+ for network in get_network_names_for_service(service)
+ }
+ unused = set(networks) - set(service_networks) - {'default'}
+ if unused:
+ log.warn(
+ "Some networks were defined but are not used by any service: "
+ "{}".format(", ".join(unused)))
+ return cls(service_networks, use_networking)
+
+ def remove(self):
+ if not self.use_networking:
+ return
+ for network in self.networks.values():
+ try:
+ network.remove()
+ except NotFound:
+ log.warn("Network %s not found.", network.full_name)
+
+ def initialize(self):
+ if not self.use_networking:
+ return
+
+ for network in self.networks.values():
+ network.ensure()
+
+
+def get_network_defs_for_service(service_dict):
+ if 'network_mode' in service_dict:
+ return {}
+ networks = service_dict.get('networks', {'default': None})
+ return dict(
+ (net, (config or {}))
+ for net, config in networks.items()
+ )
+
+
+def get_network_names_for_service(service_dict):
+ return get_network_defs_for_service(service_dict).keys()
+
+
+def get_networks(service_dict, network_definitions):
+ networks = {}
+ for name, netdef in get_network_defs_for_service(service_dict).items():
+ network = network_definitions.get(name)
+ if network:
+ networks[network.full_name] = netdef
+ else:
+ raise ConfigurationError(
+ 'Service "{}" uses an undefined network "{}"'
+ .format(service_dict['name'], name))
+
+ return networks
diff --git a/compose/parallel.py b/compose/parallel.py
new file mode 100644
index 00000000..d455711d
--- /dev/null
+++ b/compose/parallel.py
@@ -0,0 +1,298 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import operator
+import sys
+from threading import Semaphore
+from threading import Thread
+
+from docker.errors import APIError
+from six.moves import _thread as thread
+from six.moves.queue import Empty
+from six.moves.queue import Queue
+
+from compose.cli.colors import green
+from compose.cli.colors import red
+from compose.cli.signals import ShutdownException
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
+from compose.errors import OperationFailedError
+from compose.utils import get_output_stream
+
+
+log = logging.getLogger(__name__)
+
+STOP = object()
+
+
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+ """Runs func on objects in parallel while ensuring that func is
+ ran on object only after it is ran on all its dependencies.
+
+ get_deps called on object must return a collection with its dependencies.
+ get_name called on object must return its name.
+ """
+ objects = list(objects)
+ stream = get_output_stream(sys.stderr)
+
+ writer = ParallelStreamWriter(stream, msg)
+ for obj in objects:
+ writer.add_object(get_name(obj))
+ writer.write_initial()
+
+ events = parallel_execute_iter(objects, func, get_deps, limit)
+
+ errors = {}
+ results = []
+ error_to_reraise = None
+
+ for obj, result, exception in events:
+ if exception is None:
+ writer.write(get_name(obj), 'done', green)
+ results.append(result)
+ elif isinstance(exception, APIError):
+ errors[get_name(obj)] = exception.explanation
+ writer.write(get_name(obj), 'error', red)
+ elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
+ errors[get_name(obj)] = exception.msg
+ writer.write(get_name(obj), 'error', red)
+ elif isinstance(exception, UpstreamError):
+ writer.write(get_name(obj), 'error', red)
+ else:
+ errors[get_name(obj)] = exception
+ error_to_reraise = exception
+
+ for obj_name, error in errors.items():
+ stream.write("\nERROR: for {} {}\n".format(obj_name, error))
+
+ if error_to_reraise:
+ raise error_to_reraise
+
+ return results, errors
+
+
+def _no_deps(x):
+ return []
+
+
+class State(object):
+ """
+ Holds the state of a partially-complete parallel operation.
+
+ state.started: objects being processed
+ state.finished: objects which have been processed
+ state.failed: objects which either failed or whose dependencies failed
+ """
+ def __init__(self, objects):
+ self.objects = objects
+
+ self.started = set()
+ self.finished = set()
+ self.failed = set()
+
+ def is_done(self):
+ return len(self.finished) + len(self.failed) >= len(self.objects)
+
+ def pending(self):
+ return set(self.objects) - self.started - self.finished - self.failed
+
+
+class NoLimit(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *ex):
+ pass
+
+
+def parallel_execute_iter(objects, func, get_deps, limit):
+ """
+ Runs func on objects in parallel while ensuring that func is
+ ran on object only after it is ran on all its dependencies.
+
+ Returns an iterator of tuples which look like:
+
+ # if func returned normally when run on object
+ (object, result, None)
+
+ # if func raised an exception when run on object
+ (object, None, exception)
+
+ # if func raised an exception when run on one of object's dependencies
+ (object, None, UpstreamError())
+ """
+ if get_deps is None:
+ get_deps = _no_deps
+
+ if limit is None:
+ limiter = NoLimit()
+ else:
+ limiter = Semaphore(limit)
+
+ results = Queue()
+ state = State(objects)
+
+ while True:
+ feed_queue(objects, func, get_deps, results, state, limiter)
+
+ try:
+ event = results.get(timeout=0.1)
+ except Empty:
+ continue
+ # See https://github.com/docker/compose/issues/189
+ except thread.error:
+ raise ShutdownException()
+
+ if event is STOP:
+ break
+
+ obj, _, exception = event
+ if exception is None:
+ log.debug('Finished processing: {}'.format(obj))
+ state.finished.add(obj)
+ else:
+ log.debug('Failed: {}'.format(obj))
+ state.failed.add(obj)
+
+ yield event
+
+
+def producer(obj, func, results, limiter):
+ """
+ The entry point for a producer thread which runs func on a single object.
+ Places a tuple on the results queue once func has either returned or raised.
+ """
+ with limiter:
+ try:
+ result = func(obj)
+ results.put((obj, result, None))
+ except Exception as e:
+ results.put((obj, None, e))
+
+
+def feed_queue(objects, func, get_deps, results, state, limiter):
+ """
+ Starts producer threads for any objects which are ready to be processed
+ (i.e. they have no dependencies which haven't been successfully processed).
+
+ Shortcuts any objects whose dependencies have failed and places an
+ (object, None, UpstreamError()) tuple on the results queue.
+ """
+ pending = state.pending()
+ log.debug('Pending: {}'.format(pending))
+
+ for obj in pending:
+ deps = get_deps(obj)
+ try:
+ if any(dep[0] in state.failed for dep in deps):
+ log.debug('{} has upstream errors - not processing'.format(obj))
+ results.put((obj, None, UpstreamError()))
+ state.failed.add(obj)
+ elif all(
+ dep not in objects or (
+ dep in state.finished and (not ready_check or ready_check(dep))
+ ) for dep, ready_check in deps
+ ):
+ log.debug('Starting producer thread for {}'.format(obj))
+ t = Thread(target=producer, args=(obj, func, results, limiter))
+ t.daemon = True
+ t.start()
+ state.started.add(obj)
+ except (HealthCheckFailed, NoHealthCheckConfigured) as e:
+ log.debug(
+ 'Healthcheck for service(s) upstream of {} failed - '
+ 'not processing'.format(obj)
+ )
+ results.put((obj, None, e))
+
+ if state.is_done():
+ results.put(STOP)
+
+
+class UpstreamError(Exception):
+ pass
+
+
+class ParallelStreamWriter(object):
+ """Write out messages for operations happening in parallel.
+
+ Each operation has its own line, and ANSI code characters are used
+ to jump to the correct line, and write over the line.
+ """
+
+ noansi = False
+
+ @classmethod
+ def set_noansi(cls, value=True):
+ cls.noansi = value
+
+ def __init__(self, stream, msg):
+ self.stream = stream
+ self.msg = msg
+ self.lines = []
+ self.width = 0
+
+ def add_object(self, obj_index):
+ self.lines.append(obj_index)
+ self.width = max(self.width, len(obj_index))
+
+ def write_initial(self):
+ if self.msg is None:
+ return
+ for line in self.lines:
+ self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
+ width=self.width))
+ self.stream.flush()
+
+ def _write_ansi(self, obj_index, status):
+ position = self.lines.index(obj_index)
+ diff = len(self.lines) - position
+ # move up
+ self.stream.write("%c[%dA" % (27, diff))
+ # erase
+ self.stream.write("%c[2K\r" % 27)
+ self.stream.write("{} {:<{width}} ... {}\r".format(self.msg, obj_index,
+ status, width=self.width))
+ # move back down
+ self.stream.write("%c[%dB" % (27, diff))
+ self.stream.flush()
+
+ def _write_noansi(self, obj_index, status):
+ self.stream.write("{} {:<{width}} ... {}\r\n".format(self.msg, obj_index,
+ status, width=self.width))
+ self.stream.flush()
+
+ def write(self, obj_index, status, color_func):
+ if self.msg is None:
+ return
+ if self.noansi:
+ self._write_noansi(obj_index, status)
+ else:
+ self._write_ansi(obj_index, color_func(status))
+
+
+def parallel_operation(containers, operation, options, message):
+ parallel_execute(
+ containers,
+ operator.methodcaller(operation, **options),
+ operator.attrgetter('name'),
+ message,
+ )
+
+
+def parallel_remove(containers, options):
+ stopped_containers = [c for c in containers if not c.is_running]
+ parallel_operation(stopped_containers, 'remove', options, 'Removing')
+
+
+def parallel_pause(containers, options):
+ parallel_operation(containers, 'pause', options, 'Pausing')
+
+
+def parallel_unpause(containers, options):
+ parallel_operation(containers, 'unpause', options, 'Unpausing')
+
+
+def parallel_kill(containers, options):
+ parallel_operation(containers, 'kill', options, 'Killing')
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
new file mode 100644
index 00000000..5314f89f
--- /dev/null
+++ b/compose/progress_stream.py
@@ -0,0 +1,111 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import utils
+
+
+class StreamOutputError(Exception):
+ pass
+
+
+def stream_output(output, stream):
+ is_terminal = hasattr(stream, 'isatty') and stream.isatty()
+ stream = utils.get_output_stream(stream)
+ all_events = []
+ lines = {}
+ diff = 0
+
+ for event in utils.json_stream(output):
+ all_events.append(event)
+ is_progress_event = 'progress' in event or 'progressDetail' in event
+
+ if not is_progress_event:
+ print_output_event(event, stream, is_terminal)
+ stream.flush()
+ continue
+
+ if not is_terminal:
+ continue
+
+ # if it's a progress event and we have a terminal, then display the progress bars
+ image_id = event.get('id')
+ if not image_id:
+ continue
+
+ if image_id not in lines:
+ lines[image_id] = len(lines)
+ stream.write("\n")
+
+ diff = len(lines) - lines[image_id]
+
+ # move cursor up `diff` rows
+ stream.write("%c[%dA" % (27, diff))
+
+ print_output_event(event, stream, is_terminal)
+
+ if 'id' in event:
+ # move cursor back down
+ stream.write("%c[%dB" % (27, diff))
+
+ stream.flush()
+
+ return all_events
+
+
+def print_output_event(event, stream, is_terminal):
+ if 'errorDetail' in event:
+ raise StreamOutputError(event['errorDetail']['message'])
+
+ terminator = ''
+
+ if is_terminal and 'stream' not in event:
+ # erase current line
+ stream.write("%c[2K\r" % 27)
+ terminator = "\r"
+ elif 'progressDetail' in event:
+ return
+
+ if 'time' in event:
+ stream.write("[%s] " % event['time'])
+
+ if 'id' in event:
+ stream.write("%s: " % event['id'])
+
+ if 'from' in event:
+ stream.write("(from %s) " % event['from'])
+
+ status = event.get('status', '')
+
+ if 'progress' in event:
+ stream.write("%s %s%s" % (status, event['progress'], terminator))
+ elif 'progressDetail' in event:
+ detail = event['progressDetail']
+ total = detail.get('total')
+ if 'current' in detail and total:
+ percentage = float(detail['current']) / float(total) * 100
+ stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
+ else:
+ stream.write('%s%s' % (status, terminator))
+ elif 'stream' in event:
+ stream.write("%s%s" % (event['stream'], terminator))
+ else:
+ stream.write("%s%s\n" % (status, terminator))
+
+
+def get_digest_from_pull(events):
+ for event in events:
+ status = event.get('status')
+ if not status or 'Digest' not in status:
+ continue
+
+ _, digest = status.split(':', 1)
+ return digest.strip()
+ return None
+
+
+def get_digest_from_push(events):
+ for event in events:
+ digest = event.get('aux', {}).get('Digest')
+ if digest:
+ return digest
+ return None
diff --git a/compose/project.py b/compose/project.py
new file mode 100644
index 00000000..c8b57edd
--- /dev/null
+++ b/compose/project.py
@@ -0,0 +1,674 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+import logging
+import operator
+from functools import reduce
+
+import enum
+from docker.errors import APIError
+
+from . import parallel
+from .config import ConfigurationError
+from .config.config import V1
+from .config.sort_services import get_container_name_from_network_mode
+from .config.sort_services import get_service_name_from_network_mode
+from .const import IMAGE_EVENTS
+from .const import LABEL_ONE_OFF
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+from .container import Container
+from .network import build_networks
+from .network import get_networks
+from .network import ProjectNetworks
+from .service import BuildAction
+from .service import ContainerNetworkMode
+from .service import ContainerPidMode
+from .service import ConvergenceStrategy
+from .service import NetworkMode
+from .service import PidMode
+from .service import Service
+from .service import ServiceNetworkMode
+from .service import ServicePidMode
+from .utils import microseconds_from_time_nano
+from .volume import ProjectVolumes
+
+
+log = logging.getLogger(__name__)
+
+
+@enum.unique
+class OneOffFilter(enum.Enum):
+ include = 0
+ exclude = 1
+ only = 2
+
+ @classmethod
+ def update_labels(cls, value, labels):
+ if value == cls.only:
+ labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
+ elif value == cls.exclude:
+ labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
+ elif value == cls.include:
+ pass
+ else:
+ raise ValueError("Invalid value for one_off: {}".format(repr(value)))
+
+
+class Project(object):
+ """
+ A collection of services.
+ """
+ def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
+ self.name = name
+ self.services = services
+ self.client = client
+ self.volumes = volumes or ProjectVolumes({})
+ self.networks = networks or ProjectNetworks({}, False)
+ self.config_version = config_version
+
+ def labels(self, one_off=OneOffFilter.exclude):
+ labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
+
+ OneOffFilter.update_labels(one_off, labels)
+ return labels
+
+ @classmethod
+ def from_config(cls, name, config_data, client):
+ """
+ Construct a Project from a config.Config object.
+ """
+ use_networking = (config_data.version and config_data.version != V1)
+ networks = build_networks(name, config_data, client)
+ project_networks = ProjectNetworks.from_services(
+ config_data.services,
+ networks,
+ use_networking)
+ volumes = ProjectVolumes.from_config(name, config_data, client)
+ project = cls(name, [], client, project_networks, volumes, config_data.version)
+
+ for service_dict in config_data.services:
+ service_dict = dict(service_dict)
+ if use_networking:
+ service_networks = get_networks(service_dict, networks)
+ else:
+ service_networks = {}
+
+ service_dict.pop('networks', None)
+ links = project.get_links(service_dict)
+ network_mode = project.get_network_mode(
+ service_dict, list(service_networks.keys())
+ )
+ pid_mode = project.get_pid_mode(service_dict)
+ volumes_from = get_volumes_from(project, service_dict)
+
+ if config_data.version != V1:
+ service_dict['volumes'] = [
+ volumes.namespace_spec(volume_spec)
+ for volume_spec in service_dict.get('volumes', [])
+ ]
+
+ secrets = get_secrets(
+ service_dict['name'],
+ service_dict.pop('secrets', None) or [],
+ config_data.secrets)
+
+ project.services.append(
+ Service(
+ service_dict.pop('name'),
+ client=client,
+ project=name,
+ use_networking=use_networking,
+ networks=service_networks,
+ links=links,
+ network_mode=network_mode,
+ volumes_from=volumes_from,
+ secrets=secrets,
+ pid_mode=pid_mode,
+ **service_dict)
+ )
+
+ return project
+
+ @property
+ def service_names(self):
+ return [service.name for service in self.services]
+
+ def get_service(self, name):
+ """
+ Retrieve a service by name. Raises NoSuchService
+ if the named service does not exist.
+ """
+ for service in self.services:
+ if service.name == name:
+ return service
+
+ raise NoSuchService(name)
+
+ def validate_service_names(self, service_names):
+ """
+ Validate that the given list of service names only contains valid
+ services. Raises NoSuchService if one of the names is invalid.
+ """
+ valid_names = self.service_names
+ for name in service_names:
+ if name not in valid_names:
+ raise NoSuchService(name)
+
+ def get_services(self, service_names=None, include_deps=False):
+ """
+ Returns a list of this project's services filtered
+ by the provided list of names, or all services if service_names is None
+ or [].
+
+ If include_deps is specified, returns a list including the dependencies for
+ service_names, in order of dependency.
+
+ Preserves the original order of self.services where possible,
+ reordering as needed to resolve dependencies.
+
+ Raises NoSuchService if any of the named services do not exist.
+ """
+ if service_names is None or len(service_names) == 0:
+ service_names = self.service_names
+
+ unsorted = [self.get_service(name) for name in service_names]
+ services = [s for s in self.services if s in unsorted]
+
+ if include_deps:
+ services = reduce(self._inject_deps, services, [])
+
+ uniques = []
+ [uniques.append(s) for s in services if s not in uniques]
+
+ return uniques
+
+ def get_services_without_duplicate(self, service_names=None, include_deps=False):
+ services = self.get_services(service_names, include_deps)
+ for service in services:
+ service.remove_duplicate_containers()
+ return services
+
+ def get_links(self, service_dict):
+ links = []
+ if 'links' in service_dict:
+ for link in service_dict.get('links', []):
+ if ':' in link:
+ service_name, link_name = link.split(':', 1)
+ else:
+ service_name, link_name = link, None
+ try:
+ links.append((self.get_service(service_name), link_name))
+ except NoSuchService:
+ raise ConfigurationError(
+ 'Service "%s" has a link to service "%s" which does not '
+ 'exist.' % (service_dict['name'], service_name))
+ del service_dict['links']
+ return links
+
+ def get_network_mode(self, service_dict, networks):
+ network_mode = service_dict.pop('network_mode', None)
+ if not network_mode:
+ if self.networks.use_networking:
+ return NetworkMode(networks[0]) if networks else NetworkMode('none')
+ return NetworkMode(None)
+
+ service_name = get_service_name_from_network_mode(network_mode)
+ if service_name:
+ return ServiceNetworkMode(self.get_service(service_name))
+
+ container_name = get_container_name_from_network_mode(network_mode)
+ if container_name:
+ try:
+ return ContainerNetworkMode(Container.from_id(self.client, container_name))
+ except APIError:
+ raise ConfigurationError(
+ "Service '{name}' uses the network stack of container '{dep}' which "
+ "does not exist.".format(name=service_dict['name'], dep=container_name))
+
+ return NetworkMode(network_mode)
+
+ def get_pid_mode(self, service_dict):
+ pid_mode = service_dict.pop('pid', None)
+ if not pid_mode:
+ return PidMode(None)
+
+ service_name = get_service_name_from_network_mode(pid_mode)
+ if service_name:
+ return ServicePidMode(self.get_service(service_name))
+
+ container_name = get_container_name_from_network_mode(pid_mode)
+ if container_name:
+ try:
+ return ContainerPidMode(Container.from_id(self.client, container_name))
+ except APIError:
+ raise ConfigurationError(
+ "Service '{name}' uses the PID namespace of container '{dep}' which "
+ "does not exist.".format(name=service_dict['name'], dep=container_name)
+ )
+
+ return PidMode(pid_mode)
+
+ def start(self, service_names=None, **options):
+ containers = []
+
+ def start_service(service):
+ service_containers = service.start(quiet=True, **options)
+ containers.extend(service_containers)
+
+ services = self.get_services(service_names)
+
+ def get_deps(service):
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
+
+ parallel.parallel_execute(
+ services,
+ start_service,
+ operator.attrgetter('name'),
+ 'Starting',
+ get_deps,
+ )
+
+ return containers
+
+ def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options):
+ containers = self.containers(service_names, one_off=one_off)
+
+ def get_deps(container):
+ # actually returning inversed dependencies
+ return {(other, None) for other in containers
+ if container.service in
+ self.get_service(other.service).get_dependency_names()}
+
+ parallel.parallel_execute(
+ containers,
+ self.build_container_operation_with_timeout_func('stop', options),
+ operator.attrgetter('name'),
+ 'Stopping',
+ get_deps,
+ )
+
+ def pause(self, service_names=None, **options):
+ containers = self.containers(service_names)
+ parallel.parallel_pause(reversed(containers), options)
+ return containers
+
+ def unpause(self, service_names=None, **options):
+ containers = self.containers(service_names)
+ parallel.parallel_unpause(containers, options)
+ return containers
+
+ def kill(self, service_names=None, **options):
+ parallel.parallel_kill(self.containers(service_names), options)
+
+ def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options):
+ parallel.parallel_remove(self.containers(
+ service_names, stopped=True, one_off=one_off
+ ), options)
+
+ def down(self, remove_image_type, include_volumes, remove_orphans=False):
+ self.stop(one_off=OneOffFilter.include)
+ self.find_orphan_containers(remove_orphans)
+ self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
+
+ self.networks.remove()
+
+ if include_volumes:
+ self.volumes.remove()
+
+ self.remove_images(remove_image_type)
+
+ def remove_images(self, remove_image_type):
+ for service in self.get_services():
+ service.remove_image(remove_image_type)
+
+ def restart(self, service_names=None, **options):
+ containers = self.containers(service_names, stopped=True)
+
+ parallel.parallel_execute(
+ containers,
+ self.build_container_operation_with_timeout_func('restart', options),
+ operator.attrgetter('name'),
+ 'Restarting',
+ )
+ return containers
+
+ def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
+ for service in self.get_services(service_names):
+ if service.can_be_built():
+ service.build(no_cache, pull, force_rm, build_args)
+ else:
+ log.info('%s uses an image, skipping' % service.name)
+
+ def create(
+ self,
+ service_names=None,
+ strategy=ConvergenceStrategy.changed,
+ do_build=BuildAction.none,
+ ):
+ services = self.get_services_without_duplicate(service_names, include_deps=True)
+
+ for svc in services:
+ svc.ensure_image_exists(do_build=do_build)
+ plans = self._get_convergence_plans(services, strategy)
+
+ for service in services:
+ service.execute_convergence_plan(
+ plans[service.name],
+ detached=True,
+ start=False)
+
+ def events(self, service_names=None):
+ def build_container_event(event, container):
+ time = datetime.datetime.fromtimestamp(event['time'])
+ time = time.replace(
+ microsecond=microseconds_from_time_nano(event['timeNano']))
+ return {
+ 'time': time,
+ 'type': 'container',
+ 'action': event['status'],
+ 'id': container.id,
+ 'service': container.service,
+ 'attributes': {
+ 'name': container.name,
+ 'image': event['from'],
+ },
+ 'container': container,
+ }
+
+ service_names = set(service_names or self.service_names)
+ for event in self.client.events(
+ filters={'label': self.labels()},
+ decode=True
+ ):
+ # The first part of this condition is a guard against some events
+ # broadcasted by swarm that don't have a status field.
+ # See https://github.com/docker/compose/issues/3316
+ if 'status' not in event or event['status'] in IMAGE_EVENTS:
+ # We don't receive any image events because labels aren't applied
+ # to images
+ continue
+
+ # TODO: get labels from the API v1.22 , see github issue 2618
+ try:
+ # this can fail if the container has been removed
+ container = Container.from_id(self.client, event['id'])
+ except APIError:
+ continue
+ if container.service not in service_names:
+ continue
+ yield build_container_event(event, container)
+
+ def up(self,
+ service_names=None,
+ start_deps=True,
+ strategy=ConvergenceStrategy.changed,
+ do_build=BuildAction.none,
+ timeout=None,
+ detached=False,
+ remove_orphans=False,
+ scale_override=None,
+ rescale=True,
+ start=True):
+
+ warn_for_swarm_mode(self.client)
+
+ self.initialize()
+ self.find_orphan_containers(remove_orphans)
+
+ if scale_override is None:
+ scale_override = {}
+
+ services = self.get_services_without_duplicate(
+ service_names,
+ include_deps=start_deps)
+
+ for svc in services:
+ svc.ensure_image_exists(do_build=do_build)
+ plans = self._get_convergence_plans(services, strategy)
+
+ def do(service):
+ return service.execute_convergence_plan(
+ plans[service.name],
+ timeout=timeout,
+ detached=detached,
+ scale_override=scale_override.get(service.name),
+ rescale=rescale,
+ start=start
+ )
+
+ def get_deps(service):
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
+
+ results, errors = parallel.parallel_execute(
+ services,
+ do,
+ operator.attrgetter('name'),
+ None,
+ get_deps,
+ )
+ if errors:
+ raise ProjectError(
+ 'Encountered errors while bringing up the project.'
+ )
+
+ return [
+ container
+ for svc_containers in results
+ if svc_containers is not None
+ for container in svc_containers
+ ]
+
+ def initialize(self):
+ self.networks.initialize()
+ self.volumes.initialize()
+
+ def _get_convergence_plans(self, services, strategy):
+ plans = {}
+
+ for service in services:
+ updated_dependencies = [
+ name
+ for name in service.get_dependency_names()
+ if name in plans and
+ plans[name].action in ('recreate', 'create')
+ ]
+
+ if updated_dependencies and strategy.allows_recreate:
+ log.debug('%s has upstream changes (%s)',
+ service.name,
+ ", ".join(updated_dependencies))
+ plan = service.convergence_plan(ConvergenceStrategy.always)
+ else:
+ plan = service.convergence_plan(strategy)
+
+ plans[service.name] = plan
+
+ return plans
+
+ def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
+ services = self.get_services(service_names, include_deps=False)
+
+ if parallel_pull:
+ def pull_service(service):
+ service.pull(ignore_pull_failures, True)
+
+ _, errors = parallel.parallel_execute(
+ services,
+ pull_service,
+ operator.attrgetter('name'),
+ 'Pulling',
+ limit=5,
+ )
+ if len(errors):
+ raise ProjectError(b"\n".join(errors.values()))
+ else:
+ for service in services:
+ service.pull(ignore_pull_failures, silent=silent)
+
+ def push(self, service_names=None, ignore_push_failures=False):
+ for service in self.get_services(service_names, include_deps=False):
+ service.push(ignore_push_failures)
+
+ def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
+ return list(filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters={'label': self.labels(one_off=one_off)})])
+ )
+
+ def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
+ if service_names:
+ self.validate_service_names(service_names)
+ else:
+ service_names = self.service_names
+
+ containers = self._labeled_containers(stopped, one_off)
+
+ def matches_service_names(container):
+ return container.labels.get(LABEL_SERVICE) in service_names
+
+ return [c for c in containers if matches_service_names(c)]
+
+ def find_orphan_containers(self, remove_orphans):
+ def _find():
+ containers = self._labeled_containers()
+ for ctnr in containers:
+ service_name = ctnr.labels.get(LABEL_SERVICE)
+ if service_name not in self.service_names:
+ yield ctnr
+ orphans = list(_find())
+ if not orphans:
+ return
+ if remove_orphans:
+ for ctnr in orphans:
+ log.info('Removing orphan container "{0}"'.format(ctnr.name))
+ ctnr.kill()
+ ctnr.remove(force=True)
+ else:
+ log.warning(
+ 'Found orphan containers ({0}) for this project. If '
+ 'you removed or renamed this service in your compose '
+ 'file, you can run this command with the '
+ '--remove-orphans flag to clean it up.'.format(
+ ', '.join(["{}".format(ctnr.name) for ctnr in orphans])
+ )
+ )
+
+ def _inject_deps(self, acc, service):
+ dep_names = service.get_dependency_names()
+
+ if len(dep_names) > 0:
+ dep_services = self.get_services(
+ service_names=list(set(dep_names)),
+ include_deps=True
+ )
+ else:
+ dep_services = []
+
+ dep_services.append(service)
+ return acc + dep_services
+
+ def build_container_operation_with_timeout_func(self, operation, options):
+ def container_operation_with_timeout(container):
+ if options.get('timeout') is None:
+ service = self.get_service(container.service)
+ options['timeout'] = service.stop_timeout(None)
+ return getattr(container, operation)(**options)
+ return container_operation_with_timeout
+
+
+def get_volumes_from(project, service_dict):
+ volumes_from = service_dict.pop('volumes_from', None)
+ if not volumes_from:
+ return []
+
+ def build_volume_from(spec):
+ if spec.type == 'service':
+ try:
+ return spec._replace(source=project.get_service(spec.source))
+ except NoSuchService:
+ pass
+
+ if spec.type == 'container':
+ try:
+ container = Container.from_id(project.client, spec.source)
+ return spec._replace(source=container)
+ except APIError:
+ pass
+
+ raise ConfigurationError(
+ "Service \"{}\" mounts volumes from \"{}\", which is not the name "
+ "of a service or container.".format(
+ service_dict['name'],
+ spec.source))
+
+ return [build_volume_from(vf) for vf in volumes_from]
+
+
+def get_secrets(service, service_secrets, secret_defs):
+ secrets = []
+
+ for secret in service_secrets:
+ secret_def = secret_defs.get(secret.source)
+ if not secret_def:
+ raise ConfigurationError(
+ "Service \"{service}\" uses an undefined secret \"{secret}\" "
+ .format(service=service, secret=secret.source))
+
+ if secret_def.get('external_name'):
+ log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
+ "External secrets are not available to containers created by "
+ "docker-compose.".format(service=service, secret=secret.source))
+ continue
+
+ if secret.uid or secret.gid or secret.mode:
+ log.warn(
+ "Service \"{service}\" uses secret \"{secret}\" with uid, "
+ "gid, or mode. These fields are not supported by this "
+ "implementation of the Compose file".format(
+ service=service, secret=secret.source
+ )
+ )
+
+ secrets.append({'secret': secret, 'file': secret_def.get('file')})
+
+ return secrets
+
+
+def warn_for_swarm_mode(client):
+ info = client.info()
+ if info.get('Swarm', {}).get('LocalNodeState') == 'active':
+ if info.get('ServerVersion', '').startswith('ucp'):
+ # UCP does multi-node scheduling with traditional Compose files.
+ return
+
+ log.warn(
+ "The Docker Engine you're using is running in swarm mode.\n\n"
+ "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
+ "All containers will be scheduled on the current node.\n\n"
+ "To deploy your application across the swarm, "
+ "use `docker stack deploy`.\n"
+ )
+
+
+class NoSuchService(Exception):
+ def __init__(self, name):
+ self.name = name
+ self.msg = "No such service: %s" % self.name
+
+ def __str__(self):
+ return self.msg
+
+
+class ProjectError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
diff --git a/compose/service.py b/compose/service.py
new file mode 100644
index 00000000..1a18c665
--- /dev/null
+++ b/compose/service.py
@@ -0,0 +1,1428 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import re
+import sys
+from collections import namedtuple
+from operator import attrgetter
+
+import enum
+import six
+from docker.errors import APIError
+from docker.errors import ImageNotFound
+from docker.errors import NotFound
+from docker.types import LogConfig
+from docker.utils.ports import build_port_bindings
+from docker.utils.ports import split_port
+from docker.utils.utils import convert_tmpfs_mounts
+
+from . import __version__
+from . import const
+from . import progress_stream
+from .config import DOCKER_CONFIG_KEYS
+from .config import merge_environment
+from .config.errors import DependencyError
+from .config.types import ServicePort
+from .config.types import VolumeSpec
+from .const import DEFAULT_TIMEOUT
+from .const import IS_WINDOWS_PLATFORM
+from .const import LABEL_CONFIG_HASH
+from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_ONE_OFF
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+from .const import LABEL_VERSION
+from .const import NANOCPUS_SCALE
+from .container import Container
+from .errors import HealthCheckFailed
+from .errors import NoHealthCheckConfigured
+from .errors import OperationFailedError
+from .parallel import parallel_execute
+from .progress_stream import stream_output
+from .progress_stream import StreamOutputError
+from .utils import json_hash
+from .utils import parse_bytes
+from .utils import parse_seconds_float
+
+
+log = logging.getLogger(__name__)
+
+
+HOST_CONFIG_KEYS = [
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpus',
+ 'cpuset',
+ 'devices',
+ 'dns',
+ 'dns_search',
+ 'dns_opt',
+ 'env_file',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'ipc',
+ 'read_only',
+ 'log_driver',
+ 'log_opt',
+ 'mem_limit',
+ 'mem_reservation',
+ 'memswap_limit',
+ 'mem_swappiness',
+ 'oom_score_adj',
+ 'pid',
+ 'pids_limit',
+ 'privileged',
+ 'restart',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'userns_mode',
+ 'volumes_from',
+ 'volume_driver',
+]
+
+CONDITION_STARTED = 'service_started'
+CONDITION_HEALTHY = 'service_healthy'
+
+
+class BuildError(Exception):
+ def __init__(self, service, reason):
+ self.service = service
+ self.reason = reason
+
+
+class NeedsBuildError(Exception):
+ def __init__(self, service):
+ self.service = service
+
+
+class NoSuchImageError(Exception):
+ pass
+
+
+ServiceName = namedtuple('ServiceName', 'project service number')
+
+
+ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
+
+
+@enum.unique
+class ConvergenceStrategy(enum.Enum):
+ """Enumeration for all possible convergence strategies. Values refer to
+ when containers should be recreated.
+ """
+ changed = 1
+ always = 2
+ never = 3
+
+ @property
+ def allows_recreate(self):
+ return self is not type(self).never
+
+
+@enum.unique
+class ImageType(enum.Enum):
+ """Enumeration for the types of images known to compose."""
+ none = 0
+ local = 1
+ all = 2
+
+
+@enum.unique
+class BuildAction(enum.Enum):
+ """Enumeration for the possible build actions."""
+ none = 0
+ force = 1
+ skip = 2
+
+
+class Service(object):
+ def __init__(
+ self,
+ name,
+ client=None,
+ project='default',
+ use_networking=False,
+ links=None,
+ volumes_from=None,
+ network_mode=None,
+ networks=None,
+ secrets=None,
+ scale=None,
+ pid_mode=None,
+ **options
+ ):
+ self.name = name
+ self.client = client
+ self.project = project
+ self.use_networking = use_networking
+ self.links = links or []
+ self.volumes_from = volumes_from or []
+ self.network_mode = network_mode or NetworkMode(None)
+ self.pid_mode = pid_mode or PidMode(None)
+ self.networks = networks or {}
+ self.secrets = secrets or []
+ self.scale_num = scale or 1
+ self.options = options
+
+ def __repr__(self):
+ return '<Service: {}>'.format(self.name)
+
+ def containers(self, stopped=False, one_off=False, filters={}):
+ filters.update({'label': self.labels(one_off=one_off)})
+
+ return list(filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters=filters)]))
+
+ def get_container(self, number=1):
+ """Return a :class:`compose.container.Container` for this service. The
+ container must be active, and match `number`.
+ """
+ labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
+ for container in self.client.containers(filters={'label': labels}):
+ return Container.from_ps(self.client, container)
+
+ raise ValueError("No container found for %s_%s" % (self.name, number))
+
+ def start(self, **options):
+ containers = self.containers(stopped=True)
+ for c in containers:
+ self.start_container_if_stopped(c, **options)
+ return containers
+
+ def show_scale_warnings(self, desired_num):
+ if self.custom_container_name and desired_num > 1:
+ log.warn('The "%s" service is using the custom container name "%s". '
+ 'Docker requires each container to have a unique name. '
+ 'Remove the custom name to scale the service.'
+ % (self.name, self.custom_container_name))
+
+ if self.specifies_host_port() and desired_num > 1:
+ log.warn('The "%s" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'
+ % self.name)
+
+ def scale(self, desired_num, timeout=None):
+ """
+ Adjusts the number of containers to the specified number and ensures
+ they are running.
+
+ - creates containers until there are at least `desired_num`
+ - stops containers until there are at most `desired_num` running
+ - starts containers until there are at least `desired_num` running
+ - removes all stopped containers
+ """
+
+ self.show_scale_warnings(desired_num)
+
+ running_containers = self.containers(stopped=False)
+ num_running = len(running_containers)
+
+ if desired_num == num_running:
+ # do nothing as we already have the desired number
+ log.info('Desired container number already achieved')
+ return
+
+ if desired_num > num_running:
+ all_containers = self.containers(stopped=True)
+
+ if num_running != len(all_containers):
+ # we have some stopped containers, check for divergences
+ stopped_containers = [
+ c for c in all_containers if not c.is_running
+ ]
+
+ # Remove containers that have diverged
+ divergent_containers = [
+ c for c in stopped_containers if self._containers_have_diverged([c])
+ ]
+ for c in divergent_containers:
+ c.remove()
+
+ all_containers = list(set(all_containers) - set(divergent_containers))
+
+ sorted_containers = sorted(all_containers, key=attrgetter('number'))
+ self._execute_convergence_start(
+ sorted_containers, desired_num, timeout, True, True
+ )
+
+ if desired_num < num_running:
+ num_to_stop = num_running - desired_num
+
+ sorted_running_containers = sorted(
+ running_containers,
+ key=attrgetter('number'))
+
+ self._downscale(sorted_running_containers[-num_to_stop:], timeout)
+
+ def create_container(self,
+ one_off=False,
+ previous_container=None,
+ number=None,
+ quiet=False,
+ **override_options):
+ """
+ Create a container for this service. If the image doesn't exist, attempt to pull
+ it.
+ """
+ # This is only necessary for `scale` and `volumes_from`
+ # auto-creating containers to satisfy the dependency.
+ self.ensure_image_exists()
+
+ container_options = self._get_container_create_options(
+ override_options,
+ number or self._next_container_number(one_off=one_off),
+ one_off=one_off,
+ previous_container=previous_container,
+ )
+
+ if 'name' in container_options and not quiet:
+ log.info("Creating %s" % container_options['name'])
+
+ try:
+ return Container.create(self.client, **container_options)
+ except APIError as ex:
+ raise OperationFailedError("Cannot create container for service %s: %s" %
+ (self.name, ex.explanation))
+
+ def ensure_image_exists(self, do_build=BuildAction.none):
+ if self.can_be_built() and do_build == BuildAction.force:
+ self.build()
+ return
+
+ try:
+ self.image()
+ return
+ except NoSuchImageError:
+ pass
+
+ if not self.can_be_built():
+ self.pull()
+ return
+
+ if do_build == BuildAction.skip:
+ raise NeedsBuildError(self)
+
+ self.build()
+ log.warn(
+ "Image for service {} was built because it did not already exist. To "
+ "rebuild this image you must use `docker-compose build` or "
+ "`docker-compose up --build`.".format(self.name))
+
+ def image(self):
+ try:
+ return self.client.inspect_image(self.image_name)
+ except ImageNotFound:
+ raise NoSuchImageError("Image '{}' not found".format(self.image_name))
+
+ @property
+ def image_name(self):
+ return self.options.get('image', '{s.project}_{s.name}'.format(s=self))
+
+ def convergence_plan(self, strategy=ConvergenceStrategy.changed):
+ containers = self.containers(stopped=True)
+
+ if not containers:
+ return ConvergencePlan('create', [])
+
+ if strategy is ConvergenceStrategy.never:
+ return ConvergencePlan('start', containers)
+
+ if (
+ strategy is ConvergenceStrategy.always or
+ self._containers_have_diverged(containers)
+ ):
+ return ConvergencePlan('recreate', containers)
+
+ stopped = [c for c in containers if not c.is_running]
+
+ if stopped:
+ return ConvergencePlan('start', stopped)
+
+ return ConvergencePlan('noop', containers)
+
+ def _containers_have_diverged(self, containers):
+ config_hash = None
+
+ try:
+ config_hash = self.config_hash
+ except NoSuchImageError as e:
+ log.debug(
+ 'Service %s has diverged: %s',
+ self.name, six.text_type(e),
+ )
+ return True
+
+ has_diverged = False
+
+ for c in containers:
+ container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
+ if container_config_hash != config_hash:
+ log.debug(
+ '%s has diverged: %s != %s',
+ c.name, container_config_hash, config_hash,
+ )
+ has_diverged = True
+
+ return has_diverged
+
+ def _execute_convergence_create(self, scale, detached, start):
+ i = self._next_container_number()
+
+ def create_and_start(service, n):
+ container = service.create_container(number=n)
+ if not detached:
+ container.attach_log_stream()
+ if start:
+ self.start_container(container)
+ return container
+
+ containers, errors = parallel_execute(
+ range(i, i + scale),
+ lambda n: create_and_start(self, n),
+ lambda n: self.get_container_name(n),
+ "Creating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ return containers
+
+ def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+
+ def recreate(container):
+ return self.recreate_container(
+ container, timeout=timeout, attach_logs=not detached,
+ start_new_container=start
+ )
+ containers, errors = parallel_execute(
+ containers,
+ recreate,
+ lambda c: c.name,
+ "Recreating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
+
+ def _execute_convergence_start(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+ if start:
+ _, errors = parallel_execute(
+ containers,
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+ lambda c: c.name,
+ "Starting",
+ )
+
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
+
+ def _downscale(self, containers, timeout=None):
+ def stop_and_remove(container):
+ container.stop(timeout=self.stop_timeout(timeout))
+ container.remove()
+
+ parallel_execute(
+ containers,
+ stop_and_remove,
+ lambda c: c.name,
+ "Stopping and removing",
+ )
+
+ def execute_convergence_plan(self, plan, timeout=None, detached=False,
+ start=True, scale_override=None, rescale=True):
+ (action, containers) = plan
+ scale = scale_override if scale_override is not None else self.scale_num
+ containers = sorted(containers, key=attrgetter('number'))
+
+ self.show_scale_warnings(scale)
+
+ if action == 'create':
+ return self._execute_convergence_create(
+ scale, detached, start
+ )
+
+ # The create action needs always needs an initial scale, but otherwise,
+ # we set scale to none in no-rescale scenarios (`run` dependencies)
+ if not rescale:
+ scale = None
+
+ if action == 'recreate':
+ return self._execute_convergence_recreate(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'start':
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'noop':
+ if scale != len(containers):
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
+ for c in containers:
+ log.info("%s is up-to-date" % c.name)
+
+ return containers
+
+ raise Exception("Invalid action: {}".format(action))
+
+ def recreate_container(
+ self,
+ container,
+ timeout=None,
+ attach_logs=False,
+ start_new_container=True):
+ """Recreate a container.
+
+ The original container is renamed to a temporary name so that data
+ volumes can be copied to the new container, before the original
+ container is removed.
+ """
+ log.info("Recreating %s" % container.name)
+
+ container.stop(timeout=self.stop_timeout(timeout))
+ container.rename_to_tmp_name()
+ new_container = self.create_container(
+ previous_container=container,
+ number=container.labels.get(LABEL_CONTAINER_NUMBER),
+ quiet=True,
+ )
+ if attach_logs:
+ new_container.attach_log_stream()
+ if start_new_container:
+ self.start_container(new_container)
+ container.remove()
+ return new_container
+
+ def stop_timeout(self, timeout):
+ if timeout is not None:
+ return timeout
+ timeout = parse_seconds_float(self.options.get('stop_grace_period'))
+ if timeout is not None:
+ return timeout
+ return DEFAULT_TIMEOUT
+
+ def start_container_if_stopped(self, container, attach_logs=False, quiet=False):
+ if not container.is_running:
+ if not quiet:
+ log.info("Starting %s" % container.name)
+ if attach_logs:
+ container.attach_log_stream()
+ return self.start_container(container)
+
+ def start_container(self, container):
+ self.connect_container_to_networks(container)
+ try:
+ container.start()
+ except APIError as ex:
+ raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
+ return container
+
+ def connect_container_to_networks(self, container):
+ connected_networks = container.get('NetworkSettings.Networks')
+
+ for network, netdefs in self.networks.items():
+ if network in connected_networks:
+ if short_id_alias_exists(container, network):
+ continue
+
+ self.client.disconnect_container_from_network(
+ container.id,
+ network)
+
+ self.client.connect_container_to_network(
+ container.id, network,
+ aliases=self._get_aliases(netdefs, container),
+ ipv4_address=netdefs.get('ipv4_address', None),
+ ipv6_address=netdefs.get('ipv6_address', None),
+ links=self._get_links(False),
+ link_local_ips=netdefs.get('link_local_ips', None),
+ )
+
+ def remove_duplicate_containers(self, timeout=None):
+ for c in self.duplicate_containers():
+ log.info('Removing %s' % c.name)
+ c.stop(timeout=self.stop_timeout(timeout))
+ c.remove()
+
+ def duplicate_containers(self):
+ containers = sorted(
+ self.containers(stopped=True),
+ key=lambda c: c.get('Created'),
+ )
+
+ numbers = set()
+
+ for c in containers:
+ if c.number in numbers:
+ yield c
+ else:
+ numbers.add(c.number)
+
+ @property
+ def config_hash(self):
+ return json_hash(self.config_dict())
+
+ def config_dict(self):
+ return {
+ 'options': self.options,
+ 'image_id': self.image()['Id'],
+ 'links': self.get_link_names(),
+ 'net': self.network_mode.id,
+ 'networks': self.networks,
+ 'volumes_from': [
+ (v.source.name, v.mode)
+ for v in self.volumes_from if isinstance(v.source, Service)
+ ],
+ }
+
+ def get_dependency_names(self):
+ net_name = self.network_mode.service_name
+ pid_namespace = self.pid_mode.service_name
+ return (
+ self.get_linked_service_names() +
+ self.get_volumes_from_names() +
+ ([net_name] if net_name else []) +
+ ([pid_namespace] if pid_namespace else []) +
+ list(self.options.get('depends_on', {}).keys())
+ )
+
+ def get_dependency_configs(self):
+ net_name = self.network_mode.service_name
+ pid_namespace = self.pid_mode.service_name
+
+ configs = dict(
+ [(name, None) for name in self.get_linked_service_names()]
+ )
+ configs.update(dict(
+ [(name, None) for name in self.get_volumes_from_names()]
+ ))
+ configs.update({net_name: None} if net_name else {})
+ configs.update({pid_namespace: None} if pid_namespace else {})
+ configs.update(self.options.get('depends_on', {}))
+ for svc, config in self.options.get('depends_on', {}).items():
+ if config['condition'] == CONDITION_STARTED:
+ configs[svc] = lambda s: True
+ elif config['condition'] == CONDITION_HEALTHY:
+ configs[svc] = lambda s: s.is_healthy()
+ else:
+ # The config schema already prevents this, but it might be
+ # bypassed if Compose is called programmatically.
+ raise ValueError(
+ 'depends_on condition "{}" is invalid.'.format(
+ config['condition']
+ )
+ )
+
+ return configs
+
+ def get_linked_service_names(self):
+ return [service.name for (service, _) in self.links]
+
+ def get_link_names(self):
+ return [(service.name, alias) for service, alias in self.links]
+
+ def get_volumes_from_names(self):
+ return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
+
+ # TODO: this would benefit from github.com/docker/docker/pull/14699
+ # to remove the need to inspect every container
+ def _next_container_number(self, one_off=False):
+ containers = filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=True,
+ filters={'label': self.labels(one_off=one_off)})
+ ])
+ numbers = [c.number for c in containers]
+ return 1 if not numbers else max(numbers) + 1
+
+ def _get_aliases(self, network, container=None):
+ if container and container.labels.get(LABEL_ONE_OFF) == "True":
+ return []
+
+ return list(
+ {self.name} |
+ ({container.short_id} if container else set()) |
+ set(network.get('aliases', ()))
+ )
+
+ def build_default_networking_config(self):
+ if not self.networks:
+ return {}
+
+ network = self.networks[self.network_mode.id]
+ endpoint = {
+ 'Aliases': self._get_aliases(network),
+ 'IPAMConfig': {},
+ }
+
+ if network.get('ipv4_address'):
+ endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
+ if network.get('ipv6_address'):
+ endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
+
+ return {"EndpointsConfig": {self.network_mode.id: endpoint}}
+
+ def _get_links(self, link_to_self):
+ links = {}
+
+ for service, link_name in self.links:
+ for container in service.containers():
+ links[link_name or service.name] = container.name
+ links[container.name] = container.name
+ links[container.name_without_project] = container.name
+
+ if link_to_self:
+ for container in self.containers():
+ links[self.name] = container.name
+ links[container.name] = container.name
+ links[container.name_without_project] = container.name
+
+ for external_link in self.options.get('external_links') or []:
+ if ':' not in external_link:
+ link_name = external_link
+ else:
+ external_link, link_name = external_link.split(':')
+ links[link_name] = external_link
+
+ return [
+ (alias, container_name)
+ for (container_name, alias) in links.items()
+ ]
+
+ def _get_volumes_from(self):
+ return [build_volume_from(spec) for spec in self.volumes_from]
+
+ def _get_container_create_options(
+ self,
+ override_options,
+ number,
+ one_off=False,
+ previous_container=None):
+ add_config_hash = (not one_off and not override_options)
+
+ container_options = dict(
+ (k, self.options[k])
+ for k in DOCKER_CONFIG_KEYS if k in self.options)
+ override_volumes = override_options.pop('volumes', [])
+ container_options.update(override_options)
+
+ if not container_options.get('name'):
+ container_options['name'] = self.get_container_name(number, one_off)
+
+ container_options.setdefault('detach', True)
+
+ # If a qualified hostname was given, split it into an
+ # unqualified hostname and a domainname unless domainname
+ # was also given explicitly. This matches the behavior of
+ # the official Docker CLI in that scenario.
+ if ('hostname' in container_options and
+ 'domainname' not in container_options and
+ '.' in container_options['hostname']):
+ parts = container_options['hostname'].partition('.')
+ container_options['hostname'] = parts[0]
+ container_options['domainname'] = parts[2]
+
+ if 'ports' in container_options or 'expose' in self.options:
+ container_options['ports'] = build_container_ports(
+ formatted_ports(container_options.get('ports', [])),
+ self.options)
+
+ if 'volumes' in container_options or override_volumes:
+ container_options['volumes'] = list(set(
+ container_options.get('volumes', []) + override_volumes
+ ))
+
+ container_options['environment'] = merge_environment(
+ self.options.get('environment'),
+ override_options.get('environment'))
+
+ binds, affinity = merge_volume_bindings(
+ container_options.get('volumes') or [],
+ self.options.get('tmpfs') or [],
+ previous_container)
+ override_options['binds'] = binds
+ container_options['environment'].update(affinity)
+
+ container_options['volumes'] = dict(
+ (v.internal, {}) for v in container_options.get('volumes') or {})
+
+ secret_volumes = self.get_secret_volumes()
+ if secret_volumes:
+ override_options['binds'].extend(v.repr() for v in secret_volumes)
+ container_options['volumes'].update(
+ (v.internal, {}) for v in secret_volumes)
+
+ container_options['image'] = self.image_name
+
+ container_options['labels'] = build_container_labels(
+ container_options.get('labels', {}),
+ self.labels(one_off=one_off),
+ number,
+ self.config_hash if add_config_hash else None)
+
+ # Delete options which are only used in HostConfig
+ for key in HOST_CONFIG_KEYS:
+ container_options.pop(key, None)
+
+ container_options['host_config'] = self._get_container_host_config(
+ override_options,
+ one_off=one_off)
+
+ networking_config = self.build_default_networking_config()
+ if networking_config:
+ container_options['networking_config'] = networking_config
+
+ container_options['environment'] = format_environment(
+ container_options['environment'])
+ return container_options
+
+ def _get_container_host_config(self, override_options, one_off=False):
+ options = dict(self.options, **override_options)
+
+ logging_dict = options.get('logging', None)
+ blkio_config = convert_blkio_config(options.get('blkio_config', None))
+ log_config = get_log_config(logging_dict)
+ init_path = None
+ if isinstance(options.get('init'), six.string_types):
+ init_path = options.get('init')
+ options['init'] = True
+
+ nano_cpus = None
+ if 'cpus' in options:
+ nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE)
+
+ return self.client.create_host_config(
+ links=self._get_links(link_to_self=one_off),
+ port_bindings=build_port_bindings(
+ formatted_ports(options.get('ports', []))
+ ),
+ binds=options.get('binds'),
+ volumes_from=self._get_volumes_from(),
+ privileged=options.get('privileged', False),
+ network_mode=self.network_mode.mode,
+ devices=options.get('devices'),
+ dns=options.get('dns'),
+ dns_opt=options.get('dns_opt'),
+ dns_search=options.get('dns_search'),
+ restart_policy=options.get('restart'),
+ cap_add=options.get('cap_add'),
+ cap_drop=options.get('cap_drop'),
+ mem_limit=options.get('mem_limit'),
+ mem_reservation=options.get('mem_reservation'),
+ memswap_limit=options.get('memswap_limit'),
+ ulimits=build_ulimits(options.get('ulimits')),
+ log_config=log_config,
+ extra_hosts=options.get('extra_hosts'),
+ read_only=options.get('read_only'),
+ pid_mode=self.pid_mode.mode,
+ security_opt=options.get('security_opt'),
+ ipc_mode=options.get('ipc'),
+ cgroup_parent=options.get('cgroup_parent'),
+ cpu_quota=options.get('cpu_quota'),
+ shm_size=options.get('shm_size'),
+ sysctls=options.get('sysctls'),
+ pids_limit=options.get('pids_limit'),
+ tmpfs=options.get('tmpfs'),
+ oom_score_adj=options.get('oom_score_adj'),
+ mem_swappiness=options.get('mem_swappiness'),
+ group_add=options.get('group_add'),
+ userns_mode=options.get('userns_mode'),
+ init=options.get('init', None),
+ init_path=init_path,
+ isolation=options.get('isolation'),
+ cpu_count=options.get('cpu_count'),
+ cpu_percent=options.get('cpu_percent'),
+ nano_cpus=nano_cpus,
+ volume_driver=options.get('volume_driver'),
+ cpuset_cpus=options.get('cpuset'),
+ cpu_shares=options.get('cpu_shares'),
+ storage_opt=options.get('storage_opt'),
+ blkio_weight=blkio_config.get('weight'),
+ blkio_weight_device=blkio_config.get('weight_device'),
+ device_read_bps=blkio_config.get('device_read_bps'),
+ device_read_iops=blkio_config.get('device_read_iops'),
+ device_write_bps=blkio_config.get('device_write_bps'),
+ device_write_iops=blkio_config.get('device_write_iops'),
+ )
+
+ def get_secret_volumes(self):
+ def build_spec(secret):
+ target = secret['secret'].target
+ if target is None:
+ target = '{}/{}'.format(const.SECRETS_PATH, secret['secret'].source)
+ elif not os.path.isabs(target):
+ target = '{}/{}'.format(const.SECRETS_PATH, target)
+
+ return VolumeSpec(secret['file'], target, 'ro')
+
+ return [build_spec(secret) for secret in self.secrets]
+
+ def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
+ log.info('Building %s' % self.name)
+
+ build_opts = self.options.get('build', {})
+
+ build_args = build_opts.get('args', {}).copy()
+ if build_args_override:
+ build_args.update(build_args_override)
+
+ # python2 os.stat() doesn't support unicode on some UNIX, so we
+ # encode it to a bytestring to be safe
+ path = build_opts.get('context')
+ if not six.PY3 and not IS_WINDOWS_PLATFORM:
+ path = path.encode('utf8')
+
+ build_output = self.client.build(
+ path=path,
+ tag=self.image_name,
+ stream=True,
+ rm=True,
+ forcerm=force_rm,
+ pull=pull,
+ nocache=no_cache,
+ dockerfile=build_opts.get('dockerfile', None),
+ cache_from=build_opts.get('cache_from', None),
+ labels=build_opts.get('labels', None),
+ buildargs=build_args,
+ network_mode=build_opts.get('network', None),
+ target=build_opts.get('target', None),
+ shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
+ )
+
+ try:
+ all_events = stream_output(build_output, sys.stdout)
+ except StreamOutputError as e:
+ raise BuildError(self, six.text_type(e))
+
+ # Ensure the HTTP connection is not reused for another
+ # streaming command, as the Docker daemon can sometimes
+ # complain about it
+ self.client.close()
+
+ image_id = None
+
+ for event in all_events:
+ if 'stream' in event:
+ match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
+ if match:
+ image_id = match.group(1)
+
+ if image_id is None:
+ raise BuildError(self, event if all_events else 'Unknown')
+
+ return image_id
+
+ def can_be_built(self):
+ return 'build' in self.options
+
+ def labels(self, one_off=False):
+ return [
+ '{0}={1}'.format(LABEL_PROJECT, self.project),
+ '{0}={1}'.format(LABEL_SERVICE, self.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
+ ]
+
+ @property
+ def custom_container_name(self):
+ return self.options.get('container_name')
+
+ def get_container_name(self, number, one_off=False):
+ if self.custom_container_name and not one_off:
+ return self.custom_container_name
+
+ container_name = build_container_name(
+ self.project, self.name, number, one_off,
+ )
+ ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
+ if container_name in ext_links_origins:
+ raise DependencyError(
+ 'Service {0} has a self-referential external link: {1}'.format(
+ self.name, container_name
+ )
+ )
+ return container_name
+
+ def remove_image(self, image_type):
+ if not image_type or image_type == ImageType.none:
+ return False
+ if image_type == ImageType.local and self.options.get('image'):
+ return False
+
+ log.info("Removing image %s", self.image_name)
+ try:
+ self.client.remove_image(self.image_name)
+ return True
+ except APIError as e:
+ log.error("Failed to remove image for service %s: %s", self.name, e)
+ return False
+
+ def specifies_host_port(self):
+ def has_host_port(binding):
+ if isinstance(binding, dict):
+ external_bindings = binding.get('published')
+ else:
+ _, external_bindings = split_port(binding)
+
+ # there are no external bindings
+ if external_bindings is None:
+ return False
+
+ # we only need to check the first binding from the range
+ external_binding = external_bindings[0]
+
+ # non-tuple binding means there is a host port specified
+ if not isinstance(external_binding, tuple):
+ return True
+
+ # extract actual host port from tuple of (host_ip, host_port)
+ _, host_port = external_binding
+ if host_port is not None:
+ return True
+
+ return False
+
+ return any(has_host_port(binding) for binding in self.options.get('ports', []))
+
+ def pull(self, ignore_pull_failures=False, silent=False):
+ if 'image' not in self.options:
+ return
+
+ repo, tag, separator = parse_repository_tag(self.options['image'])
+ tag = tag or 'latest'
+ if not silent:
+ log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
+ try:
+ output = self.client.pull(repo, tag=tag, stream=True)
+ if silent:
+ with open(os.devnull, 'w') as devnull:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, devnull))
+ else:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, sys.stdout))
+ except (StreamOutputError, NotFound) as e:
+ if not ignore_pull_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def push(self, ignore_push_failures=False):
+ if 'image' not in self.options or 'build' not in self.options:
+ return
+
+ repo, tag, separator = parse_repository_tag(self.options['image'])
+ tag = tag or 'latest'
+ log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
+ output = self.client.push(repo, tag=tag, stream=True)
+
+ try:
+ return progress_stream.get_digest_from_push(
+ stream_output(output, sys.stdout))
+ except StreamOutputError as e:
+ if not ignore_push_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def is_healthy(self):
+ """ Check that all containers for this service report healthy.
+ Returns false if at least one healthcheck is pending.
+ If an unhealthy container is detected, raise a HealthCheckFailed
+ exception.
+ """
+ result = True
+ for ctnr in self.containers():
+ ctnr.inspect()
+ status = ctnr.get('State.Health.Status')
+ if status is None:
+ raise NoHealthCheckConfigured(self.name)
+ elif status == 'starting':
+ result = False
+ elif status == 'unhealthy':
+ raise HealthCheckFailed(ctnr.short_id)
+ return result
+
+
+def short_id_alias_exists(container, network):
+ aliases = container.get(
+ 'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
+ return container.short_id in aliases
+
+
+class PidMode(object):
+ def __init__(self, mode):
+ self._mode = mode
+
+ @property
+ def mode(self):
+ return self._mode
+
+ @property
+ def service_name(self):
+ return None
+
+
+class ServicePidMode(PidMode):
+ def __init__(self, service):
+ self.service = service
+
+ @property
+ def service_name(self):
+ return self.service.name
+
+ @property
+ def mode(self):
+ containers = self.service.containers()
+ if containers:
+ return 'container:' + containers[0].id
+
+ log.warn(
+ "Service %s is trying to use reuse the PID namespace "
+ "of another service that is not running." % (self.service_name)
+ )
+ return None
+
+
+class ContainerPidMode(PidMode):
+ def __init__(self, container):
+ self.container = container
+ self._mode = 'container:{}'.format(container.id)
+
+
+class NetworkMode(object):
+ """A `standard` network mode (ex: host, bridge)"""
+
+ service_name = None
+
+ def __init__(self, network_mode):
+ self.network_mode = network_mode
+
+ @property
+ def id(self):
+ return self.network_mode
+
+ mode = id
+
+
+class ContainerNetworkMode(object):
+ """A network mode that uses a container's network stack."""
+
+ service_name = None
+
+ def __init__(self, container):
+ self.container = container
+
+ @property
+ def id(self):
+ return self.container.id
+
+ @property
+ def mode(self):
+ return 'container:' + self.container.id
+
+
+class ServiceNetworkMode(object):
+ """A network mode that uses a service's network stack."""
+
+ def __init__(self, service):
+ self.service = service
+
+ @property
+ def id(self):
+ return self.service.name
+
+ service_name = id
+
+ @property
+ def mode(self):
+ containers = self.service.containers()
+ if containers:
+ return 'container:' + containers[0].id
+
+ log.warn("Service %s is trying to use reuse the network stack "
+ "of another service that is not running." % (self.id))
+ return None
+
+
+# Names
+
+
+def build_container_name(project, service, number, one_off=False):
+ bits = [project, service]
+ if one_off:
+ bits.append('run')
+ return '_'.join(bits + [str(number)])
+
+
+# Images
+
+def parse_repository_tag(repo_path):
+ """Splits image identification into base image path, tag/digest
+ and it's separator.
+
+ Example:
+
+ >>> parse_repository_tag('user/repo@sha256:digest')
+ ('user/repo', 'sha256:digest', '@')
+ >>> parse_repository_tag('user/repo:v1')
+ ('user/repo', 'v1', ':')
+ """
+ tag_separator = ":"
+ digest_separator = "@"
+
+ if digest_separator in repo_path:
+ repo, tag = repo_path.rsplit(digest_separator, 1)
+ return repo, tag, digest_separator
+
+ repo, tag = repo_path, ""
+ if tag_separator in repo_path:
+ repo, tag = repo_path.rsplit(tag_separator, 1)
+ if "/" in tag:
+ repo, tag = repo_path, ""
+
+ return repo, tag, tag_separator
+
+
+# Volumes
+
+
+def merge_volume_bindings(volumes, tmpfs, previous_container):
+ """Return a list of volume bindings for a container. Container data volumes
+ are replaced by those from the previous container.
+ """
+ affinity = {}
+
+ volume_bindings = dict(
+ build_volume_binding(volume)
+ for volume in volumes
+ if volume.external)
+
+ if previous_container:
+ old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
+ warn_on_masked_volume(volumes, old_volumes, previous_container.service)
+ volume_bindings.update(
+ build_volume_binding(volume) for volume in old_volumes)
+
+ if old_volumes:
+ affinity = {'affinity:container': '=' + previous_container.id}
+
+ return list(volume_bindings.values()), affinity
+
+
+def get_container_data_volumes(container, volumes_option, tmpfs_option):
+ """Find the container data volumes that are in `volumes_option`, and return
+ a mapping of volume bindings for those volumes.
+ """
+ volumes = []
+ volumes_option = volumes_option or []
+
+ container_mounts = dict(
+ (mount['Destination'], mount)
+ for mount in container.get('Mounts') or {}
+ )
+
+ image_volumes = [
+ VolumeSpec.parse(volume)
+ for volume in
+ container.image_config['ContainerConfig'].get('Volumes') or {}
+ ]
+
+ for volume in set(volumes_option + image_volumes):
+ # No need to preserve host volumes
+ if volume.external:
+ continue
+
+ # Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
+ if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
+ continue
+
+ mount = container_mounts.get(volume.internal)
+
+ # New volume, doesn't exist in the old container
+ if not mount:
+ continue
+
+ # Volume was previously a host volume, now it's a container volume
+ if not mount.get('Name'):
+ continue
+
+ # Copy existing volume from old container
+ volume = volume._replace(external=mount['Name'])
+ volumes.append(volume)
+
+ return volumes
+
+
+def warn_on_masked_volume(volumes_option, container_volumes, service):
+ container_volumes = dict(
+ (volume.internal, volume.external)
+ for volume in container_volumes)
+
+ for volume in volumes_option:
+ if (
+ volume.external and
+ volume.internal in container_volumes and
+ container_volumes.get(volume.internal) != volume.external
+ ):
+ log.warn((
+ "Service \"{service}\" is using volume \"{volume}\" from the "
+ "previous container. Host mapping \"{host_path}\" has no effect. "
+ "Remove the existing containers (with `docker-compose rm {service}`) "
+ "to use the host volume mapping."
+ ).format(
+ service=service,
+ volume=volume.internal,
+ host_path=volume.external))
+
+
+def build_volume_binding(volume_spec):
+ return volume_spec.internal, volume_spec.repr()
+
+
+def build_volume_from(volume_from_spec):
+ """
+ volume_from can be either a service or a container. We want to return the
+ container.id and format it into a string complete with the mode.
+ """
+ if isinstance(volume_from_spec.source, Service):
+ containers = volume_from_spec.source.containers(stopped=True)
+ if not containers:
+ return "{}:{}".format(
+ volume_from_spec.source.create_container().id,
+ volume_from_spec.mode)
+
+ container = containers[0]
+ return "{}:{}".format(container.id, volume_from_spec.mode)
+ elif isinstance(volume_from_spec.source, Container):
+ return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
+
+
+# Labels
+
+
+def build_container_labels(label_options, service_labels, number, config_hash):
+ labels = dict(label_options or {})
+ labels.update(label.split('=', 1) for label in service_labels)
+ labels[LABEL_CONTAINER_NUMBER] = str(number)
+ labels[LABEL_VERSION] = __version__
+
+ if config_hash:
+ log.debug("Added config hash: %s" % config_hash)
+ labels[LABEL_CONFIG_HASH] = config_hash
+
+ return labels
+
+
+# Ulimits
+
+
+def build_ulimits(ulimit_config):
+ if not ulimit_config:
+ return None
+ ulimits = []
+ for limit_name, soft_hard_values in six.iteritems(ulimit_config):
+ if isinstance(soft_hard_values, six.integer_types):
+ ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values})
+ elif isinstance(soft_hard_values, dict):
+ ulimit_dict = {'name': limit_name}
+ ulimit_dict.update(soft_hard_values)
+ ulimits.append(ulimit_dict)
+
+ return ulimits
+
+
+def get_log_config(logging_dict):
+ log_driver = logging_dict.get('driver', "") if logging_dict else ""
+ log_options = logging_dict.get('options', None) if logging_dict else None
+ return LogConfig(
+ type=log_driver,
+ config=log_options
+ )
+
+
+# TODO: remove once fix is available in docker-py
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, six.binary_type):
+ value = value.decode('utf-8')
+ return '{key}={value}'.format(key=key, value=value)
+ return [format_env(*item) for item in environment.items()]
+
+
+# Ports
+def formatted_ports(ports):
+ result = []
+ for port in ports:
+ if isinstance(port, ServicePort):
+ result.append(port.legacy_repr())
+ else:
+ result.append(port)
+ return result
+
+
+def build_container_ports(container_ports, options):
+ ports = []
+ all_ports = container_ports + options.get('expose', [])
+ for port_range in all_ports:
+ internal_range, _ = split_port(port_range)
+ for port in internal_range:
+ port = str(port)
+ if '/' in port:
+ port = tuple(port.split('/'))
+ ports.append(port)
+ return ports
+
+
+def convert_blkio_config(blkio_config):
+ result = {}
+ if blkio_config is None:
+ return result
+
+ result['weight'] = blkio_config.get('weight')
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ if field not in blkio_config:
+ continue
+ arr = []
+ for item in blkio_config[field]:
+ arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
+ result[field] = arr
+ return result
diff --git a/compose/state.py b/compose/state.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/compose/state.py
diff --git a/compose/timeparse.py b/compose/timeparse.py
new file mode 100644
index 00000000..16ef8a6d
--- /dev/null
+++ b/compose/timeparse.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+'''
+timeparse.py
+(c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014
+
+This is a vendored and modified copy of:
+github.com/wroberts/pytimeparse @ cc0550d
+
+It has been modified to mimic the behaviour of
+https://golang.org/pkg/time/#ParseDuration
+'''
+# MIT LICENSE
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import re
+
+HOURS = r'(?P<hours>[\d.]+)h'
+MINS = r'(?P<mins>[\d.]+)m'
+SECS = r'(?P<secs>[\d.]+)s'
+MILLI = r'(?P<milli>[\d.]+)ms'
+MICRO = r'(?P<micro>[\d.]+)(?:us|µs)'
+NANO = r'(?P<nano>[\d.]+)ns'
+
+
+def opt(x):
+ return r'(?:{x})?'.format(x=x)
+
+
+TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format(
+ HOURS=opt(HOURS),
+ MINS=opt(MINS),
+ SECS=opt(SECS),
+ MILLI=opt(MILLI),
+ MICRO=opt(MICRO),
+ NANO=opt(NANO),
+)
+
+MULTIPLIERS = dict([
+ ('hours', 60 * 60),
+ ('mins', 60),
+ ('secs', 1),
+ ('milli', 1.0 / 1000),
+ ('micro', 1.0 / 1000.0 / 1000),
+ ('nano', 1.0 / 1000.0 / 1000.0 / 1000.0),
+])
+
+
+def timeparse(sval):
+ """Parse a time expression, returning it as a number of seconds. If
+ possible, the return value will be an `int`; if this is not
+ possible, the return will be a `float`. Returns `None` if a time
+ expression cannot be parsed from the given string.
+
+ Arguments:
+ - `sval`: the string value to parse
+
+ >>> timeparse('1m24s')
+ 84
+ >>> timeparse('1.2 minutes')
+ 72
+ >>> timeparse('1.2 seconds')
+ 1.2
+ """
+ match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
+ if not match or not match.group(0).strip():
+ return
+
+ mdict = match.groupdict()
+ return sum(
+ MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
+
+
+def cast(value):
+ return int(value, 10) if value.isdigit() else float(value)
diff --git a/compose/utils.py b/compose/utils.py
new file mode 100644
index 00000000..197ae6eb
--- /dev/null
+++ b/compose/utils.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import codecs
+import hashlib
+import json
+import json.decoder
+import logging
+import ntpath
+
+import six
+from docker.errors import DockerException
+from docker.utils import parse_bytes as sdk_parse_bytes
+
+from .errors import StreamParseError
+from .timeparse import MULTIPLIERS
+from .timeparse import timeparse
+
+
+json_decoder = json.JSONDecoder()
+log = logging.getLogger(__name__)
+
+
+def get_output_stream(stream):
+ if six.PY3:
+ return stream
+ return codecs.getwriter('utf-8')(stream)
+
+
+def stream_as_text(stream):
+ """Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+
+ This function can be removed once docker-py returns text streams instead
+ of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ log.error(
+ 'Compose tried decoding the following data chunk, but failed:'
+ '\n%s' % repr(buffered)
+ )
+ raise StreamParseError(e)
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def json_hash(obj):
+ dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
+ h = hashlib.sha256()
+ h.update(dump.encode('utf8'))
+ return h.hexdigest()
+
+
+def microseconds_from_time_nano(time_nano):
+ return int(time_nano % 1000000000 / 1000)
+
+
+def nanoseconds_from_time_seconds(time_seconds):
+ return int(time_seconds / MULTIPLIERS['nano'])
+
+
+def parse_seconds_float(value):
+ return timeparse(value or '')
+
+
+def parse_nanoseconds_int(value):
+ parsed = timeparse(value or '')
+ if parsed is None:
+ return None
+ return nanoseconds_from_time_seconds(parsed)
+
+
+def build_string_dict(source_dict):
+ return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
+
+
+def splitdrive(path):
+ if len(path) == 0:
+ return ('', '')
+ if path[0] in ['.', '\\', '/', '~']:
+ return ('', path)
+ return ntpath.splitdrive(path)
+
+
+def parse_bytes(n):
+ try:
+ return sdk_parse_bytes(n)
+ except DockerException:
+ return None
diff --git a/compose/version.py b/compose/version.py
new file mode 100644
index 00000000..0532e16c
--- /dev/null
+++ b/compose/version.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from distutils.version import LooseVersion
+
+
+class ComposeVersion(LooseVersion):
+ """ A hashable version object """
+ def __hash__(self):
+ return hash(self.vstring)
diff --git a/compose/volume.py b/compose/volume.py
new file mode 100644
index 00000000..da8ba25c
--- /dev/null
+++ b/compose/volume.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from docker.errors import NotFound
+from docker.utils import version_lt
+
+from .config import ConfigurationError
+from .const import LABEL_PROJECT
+from .const import LABEL_VOLUME
+
+log = logging.getLogger(__name__)
+
+
+class Volume(object):
+ def __init__(self, client, project, name, driver=None, driver_opts=None,
+ external=False, labels=None, custom_name=False):
+ self.client = client
+ self.project = project
+ self.name = name
+ self.driver = driver
+ self.driver_opts = driver_opts
+ self.external = external
+ self.labels = labels
+ self.custom_name = custom_name
+
+ def create(self):
+ return self.client.create_volume(
+ self.full_name, self.driver, self.driver_opts, labels=self._labels
+ )
+
+ def remove(self):
+ if self.external:
+ log.info("Volume %s is external, skipping", self.full_name)
+ return
+ log.info("Removing volume %s", self.full_name)
+ return self.client.remove_volume(self.full_name)
+
+ def inspect(self):
+ return self.client.inspect_volume(self.full_name)
+
+ def exists(self):
+ try:
+ self.inspect()
+ except NotFound:
+ return False
+ return True
+
+ @property
+ def full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(self.project, self.name)
+
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_VOLUME: self.name,
+ })
+ return labels
+
+
+class ProjectVolumes(object):
+
+ def __init__(self, volumes):
+ self.volumes = volumes
+
+ @classmethod
+ def from_config(cls, name, config_data, client):
+ config_volumes = config_data.volumes or {}
+ volumes = {
+ vol_name: Volume(
+ client=client,
+ project=name,
+ name=data.get('name', vol_name),
+ driver=data.get('driver'),
+ driver_opts=data.get('driver_opts'),
+ custom_name=data.get('name') is not None,
+ labels=data.get('labels'),
+ external=bool(data.get('external', False))
+ )
+ for vol_name, data in config_volumes.items()
+ }
+ return cls(volumes)
+
+ def remove(self):
+ for volume in self.volumes.values():
+ try:
+ volume.remove()
+ except NotFound:
+ log.warn("Volume %s not found.", volume.full_name)
+
+ def initialize(self):
+ try:
+ for volume in self.volumes.values():
+ volume_exists = volume.exists()
+ if volume.external:
+ log.debug(
+ 'Volume {0} declared as external. No new '
+ 'volume will be created.'.format(volume.name)
+ )
+ if not volume_exists:
+ raise ConfigurationError(
+ 'Volume {name} declared as external, but could'
+ ' not be found. Please create the volume manually'
+ ' using `{command}{name}` and try again.'.format(
+ name=volume.full_name,
+ command='docker volume create --name='
+ )
+ )
+ continue
+
+ if not volume_exists:
+ log.info(
+ 'Creating volume "{0}" with {1} driver'.format(
+ volume.full_name, volume.driver or 'default'
+ )
+ )
+ volume.create()
+ else:
+ driver = volume.inspect()['Driver']
+ if volume.driver is not None and driver != volume.driver:
+ raise ConfigurationError(
+ 'Configuration for volume {0} specifies driver '
+ '{1}, but a volume with the same name uses a '
+ 'different driver ({3}). If you wish to use the '
+ 'new configuration, please remove the existing '
+ 'volume "{2}" first:\n'
+ '$ docker volume rm {2}'.format(
+ volume.name, volume.driver, volume.full_name,
+ volume.inspect()['Driver']
+ )
+ )
+ except NotFound:
+ raise ConfigurationError(
+ 'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
+ )
+
+ def namespace_spec(self, volume_spec):
+ if not volume_spec.is_named_volume:
+ return volume_spec
+
+ volume = self.volumes[volume_spec.external]
+ return volume_spec._replace(external=volume.full_name)