summaryrefslogtreecommitdiff
path: root/compose
diff options
context:
space:
mode:
Diffstat (limited to 'compose')
-rw-r--r--compose/__init__.py2
-rw-r--r--compose/bundle.py49
-rw-r--r--compose/cli/colors.py4
-rw-r--r--compose/cli/command.py68
-rw-r--r--compose/cli/docker_client.py9
-rw-r--r--compose/cli/errors.py6
-rw-r--r--compose/cli/formatter.py21
-rw-r--r--compose/cli/log_printer.py24
-rw-r--r--compose/cli/main.py275
-rw-r--r--compose/cli/utils.py6
-rw-r--r--compose/config/__init__.py1
-rw-r--r--compose/config/config.py140
-rw-r--r--compose/config/config_schema_v2.0.json21
-rw-r--r--compose/config/config_schema_v2.1.json21
-rw-r--r--compose/config/config_schema_v2.2.json21
-rw-r--r--compose/config/config_schema_v2.3.json21
-rw-r--r--compose/config/config_schema_v2.4.json24
-rw-r--r--compose/config/config_schema_v3.7.json602
-rw-r--r--compose/config/environment.py38
-rw-r--r--compose/config/errors.py4
-rw-r--r--compose/config/interpolation.py24
-rw-r--r--compose/config/serialize.py26
-rw-r--r--compose/config/types.py21
-rw-r--r--compose/config/validation.py25
-rw-r--r--compose/const.py9
-rw-r--r--compose/container.py31
-rw-r--r--compose/network.py94
-rw-r--r--compose/parallel.py22
-rw-r--r--compose/progress_stream.py13
-rw-r--r--compose/project.py242
-rw-r--r--compose/service.py596
-rw-r--r--compose/utils.py36
-rw-r--r--compose/volume.py57
33 files changed, 2046 insertions, 507 deletions
diff --git a/compose/__init__.py b/compose/__init__.py
index 693a1ab1..d35e818c 100644
--- a/compose/__init__.py
+++ b/compose/__init__.py
@@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-__version__ = '1.21.0'
+__version__ = '1.25.0'
diff --git a/compose/bundle.py b/compose/bundle.py
index 937a3708..77cb37aa 100644
--- a/compose/bundle.py
+++ b/compose/bundle.py
@@ -95,19 +95,10 @@ def get_image_digest(service, allow_push=False):
if separator == '@':
return service.options['image']
- try:
- image = service.image()
- except NoSuchImageError:
- action = 'build' if 'build' in service.options else 'pull'
- raise UserError(
- "Image not found for service '{service}'. "
- "You might need to run `docker-compose {action} {service}`."
- .format(service=service.name, action=action))
+ digest = get_digest(service)
- if image['RepoDigests']:
- # TODO: pick a digest based on the image tag if there are multiple
- # digests
- return image['RepoDigests'][0]
+ if digest:
+ return digest
if 'build' not in service.options:
raise NeedsPull(service.image_name, service.name)
@@ -118,6 +109,32 @@ def get_image_digest(service, allow_push=False):
return push_image(service)
+def get_digest(service):
+ digest = None
+ try:
+ image = service.image()
+ # TODO: pick a digest based on the image tag if there are multiple
+ # digests
+ if image['RepoDigests']:
+ digest = image['RepoDigests'][0]
+ except NoSuchImageError:
+ try:
+ # Fetch the image digest from the registry
+ distribution = service.get_image_registry_data()
+
+ if distribution['Descriptor']['digest']:
+ digest = '{image_name}@{digest}'.format(
+ image_name=service.image_name,
+ digest=distribution['Descriptor']['digest']
+ )
+ except NoSuchImageError:
+ raise UserError(
+ "Digest not found for service '{service}'. "
+ "Repository does not exist or may require 'docker login'"
+ .format(service=service.name))
+ return digest
+
+
def push_image(service):
try:
digest = service.push()
@@ -147,10 +164,10 @@ def push_image(service):
def to_bundle(config, image_digests):
if config.networks:
- log.warn("Unsupported top level key 'networks' - ignoring")
+ log.warning("Unsupported top level key 'networks' - ignoring")
if config.volumes:
- log.warn("Unsupported top level key 'volumes' - ignoring")
+ log.warning("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config)
@@ -175,7 +192,7 @@ def convert_service_to_bundle(name, service_dict, image_digest):
continue
if key not in SUPPORTED_KEYS:
- log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
+ log.warning("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue
if key == 'environment':
@@ -222,7 +239,7 @@ def make_service_networks(name, service_dict):
for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys():
- log.warn(
+ log.warning(
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name))
diff --git a/compose/cli/colors.py b/compose/cli/colors.py
index cb30e361..ea45198e 100644
--- a/compose/cli/colors.py
+++ b/compose/cli/colors.py
@@ -41,9 +41,9 @@ for (name, code) in get_pairs():
def rainbow():
- cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
+ cs = ['cyan', 'yellow', 'green', 'magenta', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
- 'intense_magenta', 'intense_red', 'intense_blue']
+ 'intense_magenta', 'intense_blue']
for c in cs:
yield globals()[c]
diff --git a/compose/cli/command.py b/compose/cli/command.py
index 8a32a93a..c3a10a04 100644
--- a/compose/cli/command.py
+++ b/compose/cli/command.py
@@ -13,6 +13,9 @@ from .. import config
from .. import parallel
from ..config.environment import Environment
from ..const import API_VERSIONS
+from ..const import LABEL_CONFIG_FILES
+from ..const import LABEL_ENVIRONMENT_FILE
+from ..const import LABEL_WORKING_DIR
from ..project import Project
from .docker_client import docker_client
from .docker_client import get_tls_version
@@ -21,9 +24,27 @@ from .utils import get_version_info
log = logging.getLogger(__name__)
-
-def project_from_options(project_dir, options):
- environment = Environment.from_env_file(project_dir)
+SILENT_COMMANDS = {
+ 'events',
+ 'exec',
+ 'kill',
+ 'logs',
+ 'pause',
+ 'ps',
+ 'restart',
+ 'rm',
+ 'start',
+ 'stop',
+ 'top',
+ 'unpause',
+}
+
+
+def project_from_options(project_dir, options, additional_options={}):
+ override_dir = options.get('--project-directory')
+ environment_file = options.get('--env-file')
+ environment = Environment.from_env_file(override_dir or project_dir, environment_file)
+ environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
set_parallel_limit(environment)
host = options.get('--host')
@@ -37,8 +58,10 @@ def project_from_options(project_dir, options):
host=host,
tls_config=tls_config_from_options(options, environment),
environment=environment,
- override_dir=options.get('--project-directory'),
+ override_dir=override_dir,
compatibility=options.get('--compatibility'),
+ interpolate=(not additional_options.get('--no-interpolate')),
+ environment_file=environment_file
)
@@ -58,14 +81,17 @@ def set_parallel_limit(environment):
parallel.GlobalLimit.set_global_limit(parallel_limit)
-def get_config_from_options(base_dir, options):
- environment = Environment.from_env_file(base_dir)
+def get_config_from_options(base_dir, options, additional_options={}):
+ override_dir = options.get('--project-directory')
+ environment_file = options.get('--env-file')
+ environment = Environment.from_env_file(override_dir or base_dir, environment_file)
config_path = get_config_path_from_options(
base_dir, options, environment
)
return config.load(
- config.find(base_dir, config_path, environment),
- options.get('--compatibility')
+ config.find(base_dir, config_path, environment, override_dir),
+ options.get('--compatibility'),
+ not additional_options.get('--no-interpolate')
)
@@ -103,14 +129,14 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
host=None, tls_config=None, environment=None, override_dir=None,
- compatibility=False):
+ compatibility=False, interpolate=True, environment_file=None):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
- config_data = config.load(config_details, compatibility)
+ config_data = config.load(config_details, compatibility, interpolate)
api_version = environment.get(
'COMPOSE_API_VERSION',
@@ -123,10 +149,30 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
with errors.handle_connection_errors(client):
return Project.from_config(
- project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
+ project_name,
+ config_data,
+ client,
+ environment.get('DOCKER_DEFAULT_PLATFORM'),
+ execution_context_labels(config_details, environment_file),
)
+def execution_context_labels(config_details, environment_file):
+ extra_labels = [
+ '{0}={1}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir)),
+ '{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)),
+ ]
+ if environment_file is not None:
+ extra_labels.append('{0}={1}'.format(LABEL_ENVIRONMENT_FILE,
+ os.path.normpath(environment_file)))
+ return extra_labels
+
+
+def config_files_label(config_details):
+ return ",".join(
+ map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
+
+
def get_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
return re.sub(r'[^-_a-z0-9]', '', name.lower())
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
index 939e95bf..a57a69b5 100644
--- a/compose/cli/docker_client.py
+++ b/compose/cli/docker_client.py
@@ -31,7 +31,7 @@ def get_tls_version(environment):
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
if not hasattr(ssl, tls_attr_name):
- log.warn(
+ log.warning(
'The "{}" protocol is unavailable. You may need to update your '
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
.format(compose_tls_version)
@@ -117,6 +117,13 @@ def docker_client(environment, version=None, tls_config=None, host=None,
kwargs['user_agent'] = generate_user_agent()
+ # Workaround for
+ # https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations
+ if 'LD_LIBRARY_PATH_ORIG' in environment:
+ kwargs['credstore_env'] = {
+ 'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'),
+ }
+
client = APIClient(**kwargs)
client._original_base_url = kwargs.get('base_url')
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
index 82768970..189b67fa 100644
--- a/compose/cli/errors.py
+++ b/compose/cli/errors.py
@@ -54,7 +54,7 @@ def handle_connection_errors(client):
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
- except (ReadTimeout, socket.timeout) as e:
+ except (ReadTimeout, socket.timeout):
log_timeout_error(client.timeout)
raise ConnectionError()
except Exception as e:
@@ -67,7 +67,9 @@ def handle_connection_errors(client):
def log_windows_pipe_error(exc):
- if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
+ if exc.winerror == 2:
+ log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
+ elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
log.error(
"The current Compose file version is not compatible with your engine version. "
"Please upgrade your Compose file to a more recent version, or set "
diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py
index 6c0a3695..c1f43ed7 100644
--- a/compose/cli/formatter.py
+++ b/compose/cli/formatter.py
@@ -2,25 +2,32 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
-import os
+import shutil
import six
import texttable
from compose.cli import colors
+if hasattr(shutil, "get_terminal_size"):
+ from shutil import get_terminal_size
+else:
+ from backports.shutil_get_terminal_size import get_terminal_size
+
def get_tty_width():
- tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
- if len(tty_size) != 2:
+ try:
+ width, _ = get_terminal_size()
+ return int(width)
+ except OSError:
return 0
- _, width = tty_size
- return int(width)
-class Formatter(object):
+class Formatter:
"""Format tabular data for printing."""
- def table(self, headers, rows):
+
+ @staticmethod
+ def table(headers, rows):
table = texttable.Texttable(max_width=get_tty_width())
table.set_cols_dtype(['t' for h in headers])
table.add_rows([headers] + rows)
diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py
index 60bba8da..a4b70a67 100644
--- a/compose/cli/log_printer.py
+++ b/compose/cli/log_printer.py
@@ -134,7 +134,10 @@ def build_thread(container, presenter, queue, log_args):
def build_thread_map(initial_containers, presenters, thread_args):
return {
container.id: build_thread(container, next(presenters), *thread_args)
- for container in initial_containers
+ # Container order is unspecified, so they are sorted by name in order to make
+ # container:presenter (log color) assignment deterministic when given a list of containers
+ # with the same names.
+ for container in sorted(initial_containers, key=lambda c: c.name)
}
@@ -210,10 +213,15 @@ def start_producer_thread(thread_args):
def watch_events(thread_map, event_stream, presenters, thread_args):
+ crashed_containers = set()
for event in event_stream:
if event['action'] == 'stop':
thread_map.pop(event['id'], None)
+ if event['action'] == 'die':
+ thread_map.pop(event['id'], None)
+ crashed_containers.add(event['id'])
+
if event['action'] != 'start':
continue
@@ -223,10 +231,22 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
# Container was stopped and started, we need a new thread
thread_map.pop(event['id'], None)
+ # Container crashed so we should reattach to it
+ if event['id'] in crashed_containers:
+ container = event['container']
+ if not container.is_restarting:
+ try:
+ container.attach_log_stream()
+ except APIError:
+ # Just ignore errors when reattaching to already crashed containers
+ pass
+ crashed_containers.remove(event['id'])
+
thread_map[event['id']] = build_thread(
event['container'],
next(presenters),
- *thread_args)
+ *thread_args
+ )
def consume_queue(queue, cascade_stop):
diff --git a/compose/cli/main.py b/compose/cli/main.py
index a9720583..fde4fd03 100644
--- a/compose/cli/main.py
+++ b/compose/cli/main.py
@@ -6,6 +6,7 @@ import contextlib
import functools
import json
import logging
+import os
import pipes
import re
import subprocess
@@ -102,9 +103,9 @@ def dispatch():
options, handler, command_options = dispatcher.parse(sys.argv[1:])
setup_console_handler(console_handler,
options.get('--verbose'),
- options.get('--no-ansi'),
+ set_no_color_if_clicolor(options.get('--no-ansi')),
options.get("--log-level"))
- setup_parallel_logger(options.get('--no-ansi'))
+ setup_parallel_logger(set_no_color_if_clicolor(options.get('--no-ansi')))
if options.get('--no-ansi'):
command_options['--no-color'] = True
return functools.partial(perform_command, options, handler, command_options)
@@ -206,8 +207,9 @@ class TopLevelCommand(object):
name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
- --compatibility If set, Compose will attempt to convert deploy
- keys in v3 files to their non-Swarm equivalent
+ --compatibility If set, Compose will attempt to convert keys
+ in v3 files to their non-Swarm equivalent
+ --env-file PATH Specify an alternate environment file
Commands:
build Build or rebuild services
@@ -238,11 +240,19 @@ class TopLevelCommand(object):
version Show the Docker-Compose version information
"""
- def __init__(self, project, project_dir='.', options=None):
+ def __init__(self, project, options=None):
self.project = project
- self.project_dir = '.'
self.toplevel_options = options or {}
+ @property
+ def project_dir(self):
+ return self.toplevel_options.get('--project-directory') or '.'
+
+ @property
+ def toplevel_environment(self):
+ environment_file = self.toplevel_options.get('--env-file')
+ return Environment.from_env_file(self.project_dir, environment_file)
+
def build(self, options):
"""
Build or rebuild services.
@@ -254,12 +264,18 @@ class TopLevelCommand(object):
Usage: build [options] [--build-arg key=val...] [SERVICE...]
Options:
+ --build-arg key=val Set build-time variables for services.
--compress Compress the build context using gzip.
--force-rm Always remove intermediate containers.
+ -m, --memory MEM Set memory limit for the build container.
--no-cache Do not use cache when building the image.
+ --no-rm Do not remove intermediate containers after a successful build.
+ --parallel Build images in parallel.
+ --progress string Set type of progress output (auto, plain, tty).
+ EXPERIMENTAL flag for native builder.
+ To enable, run with COMPOSE_DOCKER_CLI_BUILD=1)
--pull Always attempt to pull a newer version of the image.
- -m, --memory MEM Sets memory limit for the build container.
- --build-arg key=val Set build-time variables for services.
+ -q, --quiet Don't print anything to STDOUT
"""
service_names = options['SERVICE']
build_args = options.get('--build-arg', None)
@@ -269,8 +285,9 @@ class TopLevelCommand(object):
'--build-arg is only supported when services are specified for API version < 1.25.'
' Please use a Compose file version > 2.2 or specify which services to build.'
)
- environment = Environment.from_env_file(self.project_dir)
- build_args = resolve_build_args(build_args, environment)
+ build_args = resolve_build_args(build_args, self.toplevel_environment)
+
+ native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
self.project.build(
service_names=options['SERVICE'],
@@ -278,8 +295,13 @@ class TopLevelCommand(object):
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)),
memory=options.get('--memory'),
+ rm=not bool(options.get('--no-rm', False)),
build_args=build_args,
gzip=options.get('--compress', False),
+ parallel_build=options.get('--parallel', False),
+ silent=options.get('--quiet', False),
+ cli=native_builder,
+ progress=options.get('--progress'),
)
def bundle(self, options):
@@ -301,7 +323,7 @@ class TopLevelCommand(object):
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
"""
- compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
+ compose_config = get_config_from_options('.', self.toplevel_options)
output = options["--output"]
if not output:
@@ -322,18 +344,22 @@ class TopLevelCommand(object):
Options:
--resolve-image-digests Pin image tags to digests.
+ --no-interpolate Don't interpolate environment variables
-q, --quiet Only validate the configuration, don't print
anything.
--services Print the service names, one per line.
--volumes Print the volume names, one per line.
-
+ --hash="*" Print the service config hash, one per line.
+ Set "service1,service2" for a list of specified services
+ or use the wildcard symbol to display all services
"""
- compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
+ additional_options = {'--no-interpolate': options.get('--no-interpolate')}
+ compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
image_digests = None
if options['--resolve-image-digests']:
- self.project = project_from_options('.', self.toplevel_options)
+ self.project = project_from_options('.', self.toplevel_options, additional_options)
with errors.handle_connection_errors(self.project.client):
image_digests = image_digests_for_project(self.project)
@@ -348,7 +374,16 @@ class TopLevelCommand(object):
print('\n'.join(volume for volume in compose_config.volumes))
return
- print(serialize_config(compose_config, image_digests))
+ if options['--hash'] is not None:
+ h = options['--hash']
+ self.project = project_from_options('.', self.toplevel_options, additional_options)
+ services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
+ with errors.handle_connection_errors(self.project.client):
+ for service in self.project.get_services(services):
+ print('{} {}'.format(service.name, service.config_hash))
+ return
+
+ print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
def create(self, options):
"""
@@ -367,7 +402,7 @@ class TopLevelCommand(object):
"""
service_names = options['SERVICE']
- log.warn(
+ log.warning(
'The create command is deprecated. '
'Use the up command with the --no-start flag instead.'
)
@@ -406,8 +441,7 @@ class TopLevelCommand(object):
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
- environment = Environment.from_env_file(self.project_dir)
- ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+ ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and options['--remove-orphans']:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
@@ -464,8 +498,7 @@ class TopLevelCommand(object):
not supported in API < 1.25)
-w, --workdir DIR Path to workdir directory for this command.
"""
- environment = Environment.from_env_file(self.project_dir)
- use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+ use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
@@ -488,7 +521,7 @@ class TopLevelCommand(object):
if IS_WINDOWS_PLATFORM or use_cli and not detach:
sys.exit(call_docker(
build_exec_command(options, container.id, command),
- self.toplevel_options)
+ self.toplevel_options, self.toplevel_environment)
)
create_exec_options = {
@@ -552,31 +585,43 @@ class TopLevelCommand(object):
if options['--quiet']:
for image in set(c.image for c in containers):
print(image.split(':')[1])
- else:
- headers = [
- 'Container',
- 'Repository',
- 'Tag',
- 'Image Id',
- 'Size'
- ]
- rows = []
- for container in containers:
- image_config = container.image_config
- repo_tags = (
- image_config['RepoTags'][0].rsplit(':', 1) if image_config['RepoTags']
- else ('<none>', '<none>')
- )
- image_id = image_config['Id'].split(':')[1][:12]
- size = human_readable_file_size(image_config['Size'])
- rows.append([
- container.name,
- repo_tags[0],
- repo_tags[1],
- image_id,
- size
- ])
- print(Formatter().table(headers, rows))
+ return
+
+ def add_default_tag(img_name):
+ if ':' not in img_name.split('/')[-1]:
+ return '{}:latest'.format(img_name)
+ return img_name
+
+ headers = [
+ 'Container',
+ 'Repository',
+ 'Tag',
+ 'Image Id',
+ 'Size'
+ ]
+ rows = []
+ for container in containers:
+ image_config = container.image_config
+ service = self.project.get_service(container.service)
+ index = 0
+ img_name = add_default_tag(service.image_name)
+ if img_name in image_config['RepoTags']:
+ index = image_config['RepoTags'].index(img_name)
+ repo_tags = (
+ image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
+ else ('<none>', '<none>')
+ )
+
+ image_id = image_config['Id'].split(':')[1][:12]
+ size = human_readable_file_size(image_config['Size'])
+ rows.append([
+ container.name,
+ repo_tags[0],
+ repo_tags[1],
+ image_id,
+ size
+ ])
+ print(Formatter.table(headers, rows))
def kill(self, options):
"""
@@ -622,7 +667,7 @@ class TopLevelCommand(object):
log_printer_from_project(
self.project,
containers,
- options['--no-color'],
+ set_no_color_if_clicolor(options['--no-color']),
log_args,
event_stream=self.project.events(service_names=options['SERVICE'])).run()
@@ -666,6 +711,7 @@ class TopLevelCommand(object):
-q, --quiet Only display IDs
--services Display services
--filter KEY=VAL Filter services by a property
+ -a, --all Show all stopped containers (including those created by the run command)
"""
if options['--quiet'] and options['--services']:
raise UserError('--quiet and --services cannot be combined')
@@ -678,10 +724,15 @@ class TopLevelCommand(object):
print('\n'.join(service.name for service in services))
return
- containers = sorted(
- self.project.containers(service_names=options['SERVICE'], stopped=True) +
- self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
- key=attrgetter('name'))
+ if options['--all']:
+ containers = sorted(self.project.containers(service_names=options['SERVICE'],
+ one_off=OneOffFilter.include, stopped=True),
+ key=attrgetter('name'))
+ else:
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
if options['--quiet']:
for container in containers:
@@ -704,7 +755,7 @@ class TopLevelCommand(object):
container.human_readable_state,
container.human_readable_ports,
])
- print(Formatter().table(headers, rows))
+ print(Formatter.table(headers, rows))
def pull(self, options):
"""
@@ -720,7 +771,7 @@ class TopLevelCommand(object):
--include-deps Also pull services declared as dependencies
"""
if options.get('--parallel'):
- log.warn('--parallel option is deprecated and will be removed in future versions.')
+ log.warning('--parallel option is deprecated and will be removed in future versions.')
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'),
@@ -761,7 +812,7 @@ class TopLevelCommand(object):
-a, --all Deprecated - no effect.
"""
if options.get('--all'):
- log.warn(
+ log.warning(
'--all flag is obsolete. This is now the default behavior '
'of `docker-compose rm`'
)
@@ -839,10 +890,12 @@ class TopLevelCommand(object):
else:
command = service.options.get('command')
- container_options = build_container_options(options, detach, command)
+ options['stdin_open'] = service.options.get('stdin_open', True)
+
+ container_options = build_one_off_container_options(options, detach, command)
run_one_off_container(
container_options, self.project, service, options,
- self.toplevel_options, self.project_dir
+ self.toplevel_options, self.toplevel_environment
)
def scale(self, options):
@@ -871,7 +924,7 @@ class TopLevelCommand(object):
'Use the up command with the --scale flag instead.'
)
else:
- log.warn(
+ log.warning(
'The scale command is deprecated. '
'Use the up command with the --scale flag instead.'
)
@@ -942,7 +995,7 @@ class TopLevelCommand(object):
rows.append(process)
print(container.name)
- print(Formatter().table(headers, rows))
+ print(Formatter.table(headers, rows))
def unpause(self, options):
"""
@@ -1017,8 +1070,7 @@ class TopLevelCommand(object):
if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.")
- environment = Environment.from_env_file(self.project_dir)
- ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+ ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
@@ -1027,6 +1079,8 @@ class TopLevelCommand(object):
for excluded in [x for x in opts if options.get(x) and no_start]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+ native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
+
with up_shutdown_context(self.project, service_names, timeout, detached):
warn_for_swarm_mode(self.project.client)
@@ -1046,6 +1100,7 @@ class TopLevelCommand(object):
reset_container_image=rebuild,
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
silent=options.get('--quiet-pull'),
+ cli=native_builder,
)
try:
@@ -1070,7 +1125,7 @@ class TopLevelCommand(object):
log_printer = log_printer_from_project(
self.project,
attached_containers,
- options['--no-color'],
+ set_no_color_if_clicolor(options['--no-color']),
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names))
@@ -1085,12 +1140,15 @@ class TopLevelCommand(object):
)
self.project.stop(service_names=service_names, timeout=timeout)
+ if exit_value_from:
+ exit_code = compute_service_exit_code(exit_value_from, attached_containers)
+
sys.exit(exit_code)
@classmethod
def version(cls, options):
"""
- Show version informations
+ Show version information
Usage: version [--short]
@@ -1103,33 +1161,33 @@ class TopLevelCommand(object):
print(get_version_info('full'))
+def compute_service_exit_code(exit_value_from, attached_containers):
+ candidates = list(filter(
+ lambda c: c.service == exit_value_from,
+ attached_containers))
+ if not candidates:
+ log.error(
+ 'No containers matching the spec "{0}" '
+ 'were run.'.format(exit_value_from)
+ )
+ return 2
+ if len(candidates) > 1:
+ exit_values = filter(
+ lambda e: e != 0,
+ [c.inspect()['State']['ExitCode'] for c in candidates]
+ )
+
+ return exit_values[0]
+ return candidates[0].inspect()['State']['ExitCode']
+
+
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
exit_code = 0
- if exit_value_from:
- candidates = list(filter(
- lambda c: c.service == exit_value_from,
- attached_containers))
- if not candidates:
- log.error(
- 'No containers matching the spec "{0}" '
- 'were run.'.format(exit_value_from)
- )
- exit_code = 2
- elif len(candidates) > 1:
- exit_values = filter(
- lambda e: e != 0,
- [c.inspect()['State']['ExitCode'] for c in candidates]
- )
-
- exit_code = exit_values[0]
- else:
- exit_code = candidates[0].inspect()['State']['ExitCode']
- else:
- for e in all_containers:
- if (not e.is_running and cascade_starter == e.name):
- if not e.exit_code == 0:
- exit_code = e.exit_code
- break
+ for e in all_containers:
+ if (not e.is_running and cascade_starter == e.name):
+ if not e.exit_code == 0:
+ exit_code = e.exit_code
+ break
return exit_code
@@ -1200,7 +1258,7 @@ def exitval_from_opts(options, project):
exit_value_from = options.get('--exit-code-from')
if exit_value_from:
if not options.get('--abort-on-container-exit'):
- log.warn('using --exit-code-from implies --abort-on-container-exit')
+ log.warning('using --exit-code-from implies --abort-on-container-exit')
options['--abort-on-container-exit'] = True
if exit_value_from not in [s.name for s in project.get_services()]:
log.error('No service named "%s" was found in your compose file.',
@@ -1231,11 +1289,11 @@ def build_action_from_opts(options):
return BuildAction.none
-def build_container_options(options, detach, command):
+def build_one_off_container_options(options, detach, command):
container_options = {
'command': command,
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
- 'stdin_open': not detach,
+ 'stdin_open': options.get('stdin_open'),
'detach': detach,
}
@@ -1252,8 +1310,8 @@ def build_container_options(options, detach, command):
[""] if options['--entrypoint'] == '' else options['--entrypoint']
)
- if options['--rm']:
- container_options['restart'] = None
+ # Ensure that run command remains one-off (issue #6302)
+ container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
@@ -1278,7 +1336,7 @@ def build_container_options(options, detach, command):
def run_one_off_container(container_options, project, service, options, toplevel_options,
- project_dir='.'):
+ toplevel_environment):
if not options['--no-deps']:
deps = service.get_dependency_names()
if deps:
@@ -1307,8 +1365,7 @@ def run_one_off_container(container_options, project, service, options, toplevel
if options['--rm']:
project.client.remove_container(container.id, force=True, v=True)
- environment = Environment.from_env_file(project_dir)
- use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+ use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
signals.set_signal_handler_to_shutdown()
signals.set_signal_handler_to_hang_up()
@@ -1317,8 +1374,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
if IS_WINDOWS_PLATFORM or use_cli:
service.connect_container_to_networks(container, use_network_aliases)
exit_code = call_docker(
- ["start", "--attach", "--interactive", container.id],
- toplevel_options
+ get_docker_start_call(container_options, container.id),
+ toplevel_options, toplevel_environment
)
else:
operation = RunOperation(
@@ -1344,6 +1401,16 @@ def run_one_off_container(container_options, project, service, options, toplevel
sys.exit(exit_code)
+def get_docker_start_call(container_options, container_id):
+ docker_call = ["start"]
+ if not container_options.get('detach'):
+ docker_call.append("--attach")
+ if container_options.get('stdin_open'):
+ docker_call.append("--interactive")
+ docker_call.append(container_id)
+ return docker_call
+
+
def log_printer_from_project(
project,
containers,
@@ -1398,7 +1465,7 @@ def exit_if(condition, message, exit_code):
raise SystemExit(exit_code)
-def call_docker(args, dockeropts):
+def call_docker(args, dockeropts, environment):
executable_path = find_executable('docker')
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
@@ -1421,12 +1488,14 @@ def call_docker(args, dockeropts):
if verify:
tls_options.append('--tlsverify')
if host:
- tls_options.extend(['--host', host.lstrip('=')])
+ tls_options.extend(
+ ['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
+ )
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
- return subprocess.call(args)
+ return subprocess.call(args, env=environment)
def parse_scale_args(options):
@@ -1527,10 +1596,14 @@ def warn_for_swarm_mode(client):
# UCP does multi-node scheduling with traditional Compose files.
return
- log.warn(
+ log.warning(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
+
+
+def set_no_color_if_clicolor(no_color_flag):
+ return no_color_flag or os.environ.get('CLICOLOR') == "0"
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
index 4cc055cc..931487a6 100644
--- a/compose/cli/utils.py
+++ b/compose/cli/utils.py
@@ -133,12 +133,12 @@ def generate_user_agent():
def human_readable_file_size(size):
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
- order = int(math.log(size, 2) / 10) if size else 0
+ order = int(math.log(size, 1000)) if size else 0
if order >= len(suffixes):
order = len(suffixes) - 1
- return '{0:.3g} {1}'.format(
- size / float(1 << (order * 10)),
+ return '{0:.4g} {1}'.format(
+ size / pow(10, order * 3),
suffixes[order]
)
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
index e1032f3d..2b40666f 100644
--- a/compose/config/__init__.py
+++ b/compose/config/__init__.py
@@ -6,6 +6,7 @@ from . import environment
from .config import ConfigurationError
from .config import DOCKER_CONFIG_KEYS
from .config import find
+from .config import is_url
from .config import load
from .config import merge_environment
from .config import merge_labels
diff --git a/compose/config/config.py b/compose/config/config.py
index 9f8a50c6..f64dc04a 100644
--- a/compose/config/config.py
+++ b/compose/config/config.py
@@ -8,6 +8,7 @@ import os
import string
import sys
from collections import namedtuple
+from operator import attrgetter
import six
import yaml
@@ -50,6 +51,7 @@ from .validation import match_named_volumes
from .validation import validate_against_config_schema
from .validation import validate_config_section
from .validation import validate_cpu
+from .validation import validate_credential_spec
from .validation import validate_depends_on
from .validation import validate_extends_file_path
from .validation import validate_healthcheck
@@ -91,6 +93,7 @@ DOCKER_CONFIG_KEYS = [
'healthcheck',
'image',
'ipc',
+ 'isolation',
'labels',
'links',
'mac_address',
@@ -195,9 +198,9 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
version = self.config['version']
if isinstance(version, dict):
- log.warn('Unexpected type for "version" key in "{}". Assuming '
- '"version" is the name of a service, and defaulting to '
- 'Compose file version 1.'.format(self.filename))
+ log.warning('Unexpected type for "version" key in "{}". Assuming '
+ '"version" is the name of a service, and defaulting to '
+ 'Compose file version 1.'.format(self.filename))
return V1
if not isinstance(version, six.string_types):
@@ -315,8 +318,8 @@ def get_default_config_files(base_dir):
winner = candidates[0]
if len(candidates) > 1:
- log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
- log.warn("Using %s\n", winner)
+ log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
+ log.warning("Using %s\n", winner)
return [os.path.join(path, winner)] + get_default_override_file(path)
@@ -359,7 +362,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
def check_swarm_only_key(service_dicts, key):
services = [s for s in service_dicts if s.get(key)]
if services:
- log.warn(
+ log.warning(
warning_template.format(
services=", ".join(sorted(s['name'] for s in services)),
key=key
@@ -367,11 +370,10 @@ def check_swarm_only_config(service_dicts, compatibility=False):
)
if not compatibility:
check_swarm_only_key(service_dicts, 'deploy')
- check_swarm_only_key(service_dicts, 'credential_spec')
check_swarm_only_key(service_dicts, 'configs')
-def load(config_details, compatibility=False):
+def load(config_details, compatibility=False, interpolate=True):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration.
@@ -381,7 +383,7 @@ def load(config_details, compatibility=False):
validate_config_version(config_details.config_files)
processed_files = [
- process_config_file(config_file, config_details.environment)
+ process_config_file(config_file, config_details.environment, interpolate=interpolate)
for config_file in config_details.config_files
]
config_details = config_details._replace(config_files=processed_files)
@@ -503,7 +505,6 @@ def load_services(config_details, config_file, compatibility=False):
def interpolate_config_section(config_file, config, section, environment):
- validate_config_section(config_file.filename, config, section)
return interpolate_environment_variables(
config_file.version,
config,
@@ -512,38 +513,60 @@ def interpolate_config_section(config_file, config, section, environment):
)
-def process_config_file(config_file, environment, service_name=None):
- services = interpolate_config_section(
+def process_config_section(config_file, config, section, environment, interpolate):
+ validate_config_section(config_file.filename, config, section)
+ if interpolate:
+ return interpolate_environment_variables(
+ config_file.version,
+ config,
+ section,
+ environment
+ )
+ else:
+ return config
+
+
+def process_config_file(config_file, environment, service_name=None, interpolate=True):
+ services = process_config_section(
config_file,
config_file.get_service_dicts(),
'service',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version > V1:
processed_config = dict(config_file.config)
processed_config['services'] = services
- processed_config['volumes'] = interpolate_config_section(
+ processed_config['volumes'] = process_config_section(
config_file,
config_file.get_volumes(),
'volume',
- environment)
- processed_config['networks'] = interpolate_config_section(
+ environment,
+ interpolate,
+ )
+ processed_config['networks'] = process_config_section(
config_file,
config_file.get_networks(),
'network',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version >= const.COMPOSEFILE_V3_1:
- processed_config['secrets'] = interpolate_config_section(
+ processed_config['secrets'] = process_config_section(
config_file,
config_file.get_secrets(),
'secret',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version >= const.COMPOSEFILE_V3_3:
- processed_config['configs'] = interpolate_config_section(
+ processed_config['configs'] = process_config_section(
config_file,
config_file.get_configs(),
'config',
- environment
+ environment,
+ interpolate,
)
else:
processed_config = services
@@ -592,7 +615,7 @@ class ServiceExtendsResolver(object):
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
- if config_path == self.config_file.filename:
+ if config_path == os.path.abspath(self.config_file.filename):
try:
service_config = self.config_file.get_service(service_name)
except KeyError:
@@ -704,6 +727,7 @@ def validate_service(service_config, service_names, config_file):
validate_depends_on(service_config, service_names)
validate_links(service_config, service_names)
validate_healthcheck(service_config)
+ validate_credential_spec(service_config)
if not service_dict.get('image') and has_uppercase(service_name):
raise ConfigurationError(
@@ -834,6 +858,17 @@ def finalize_service_volumes(service_dict, environment):
finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
else:
finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
+
+ duplicate_mounts = []
+ mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
+ for mount in mounts:
+ if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
+ duplicate_mounts.append(mount.repr())
+
+ if duplicate_mounts:
+ raise ConfigurationError("Duplicate mount points: [%s]" % (
+ ', '.join(duplicate_mounts)))
+
service_dict['volumes'] = finalized_volumes
return service_dict
@@ -881,11 +916,12 @@ def finalize_service(service_config, service_names, version, environment, compat
normalize_build(service_dict, service_config.working_dir, environment)
if compatibility:
+ service_dict = translate_credential_spec_to_security_opt(service_dict)
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
service_dict
)
if ignored_keys:
- log.warn(
+ log.warning(
'The following deploy sub-keys are not supported in compatibility mode and have'
' been ignored: {}'.format(', '.join(ignored_keys))
)
@@ -917,13 +953,37 @@ def convert_restart_policy(name):
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
+def convert_credential_spec_to_security_opt(credential_spec):
+ if 'file' in credential_spec:
+ return 'file://{file}'.format(file=credential_spec['file'])
+ return 'registry://{registry}'.format(registry=credential_spec['registry'])
+
+
+def translate_credential_spec_to_security_opt(service_dict):
+ result = []
+
+ if 'credential_spec' in service_dict:
+ spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
+ result.append('credentialspec={spec}'.format(spec=spec))
+
+ if result:
+ service_dict['security_opt'] = result
+
+ return service_dict
+
+
def translate_deploy_keys_to_container_config(service_dict):
+ if 'credential_spec' in service_dict:
+ del service_dict['credential_spec']
+ if 'configs' in service_dict:
+ del service_dict['configs']
+
if 'deploy' not in service_dict:
return service_dict, []
deploy_dict = service_dict['deploy']
ignored_keys = [
- k for k in ['endpoint_mode', 'labels', 'update_config', 'placement']
+ k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config', 'placement']
if k in deploy_dict
]
@@ -946,10 +1006,6 @@ def translate_deploy_keys_to_container_config(service_dict):
)
del service_dict['deploy']
- if 'credential_spec' in service_dict:
- del service_dict['credential_spec']
- if 'configs' in service_dict:
- del service_dict['configs']
return service_dict, ignored_keys
@@ -1038,15 +1094,16 @@ def merge_service_dicts(base, override, version):
md.merge_mapping('environment', parse_environment)
md.merge_mapping('labels', parse_labels)
md.merge_mapping('ulimits', parse_flat_dict)
- md.merge_mapping('networks', parse_networks)
md.merge_mapping('sysctls', parse_sysctls)
md.merge_mapping('depends_on', parse_depends_on)
+ md.merge_mapping('storage_opt', parse_flat_dict)
md.merge_sequence('links', ServiceLink.parse)
md.merge_sequence('secrets', types.ServiceSecret.parse)
md.merge_sequence('configs', types.ServiceConfig.parse)
md.merge_sequence('security_opt', types.SecurityOpt.parse)
md.merge_mapping('extra_hosts', parse_extra_hosts)
+ md.merge_field('networks', merge_networks, default={})
for field in ['volumes', 'devices']:
md.merge_field(field, merge_path_mappings)
@@ -1135,6 +1192,7 @@ def merge_deploy(base, override):
md.merge_scalar('replicas')
md.merge_mapping('labels', parse_labels)
md.merge_mapping('update_config')
+ md.merge_mapping('rollback_config')
md.merge_mapping('restart_policy')
if md.needs_merge('resources'):
resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
@@ -1150,6 +1208,22 @@ def merge_deploy(base, override):
return dict(md)
+def merge_networks(base, override):
+ merged_networks = {}
+ all_network_names = set(base) | set(override)
+ base = {k: {} for k in base} if isinstance(base, list) else base
+ override = {k: {} for k in override} if isinstance(override, list) else override
+ for network_name in all_network_names:
+ md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
+ md.merge_field('aliases', merge_unique_items_lists, [])
+ md.merge_field('link_local_ips', merge_unique_items_lists, [])
+ md.merge_scalar('priority')
+ md.merge_scalar('ipv4_address')
+ md.merge_scalar('ipv6_address')
+ merged_networks[network_name] = dict(md)
+ return merged_networks
+
+
def merge_reservations(base, override):
md = MergeDict(base, override)
md.merge_scalar('cpus')
@@ -1279,7 +1353,7 @@ def resolve_volume_paths(working_dir, service_dict):
def resolve_volume_path(working_dir, volume):
if isinstance(volume, dict):
- if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
+ if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
volume['source'] = expand_path(working_dir, volume['source'])
return volume
@@ -1434,15 +1508,15 @@ def has_uppercase(name):
return any(char in string.ascii_uppercase for char in name)
-def load_yaml(filename, encoding=None):
+def load_yaml(filename, encoding=None, binary=True):
try:
- with io.open(filename, 'r', encoding=encoding) as fh:
+ with io.open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
if encoding is None:
# Sometimes the user's locale sets an encoding that doesn't match
# the YAML files. Im such cases, retry once with the "default"
# UTF-8 encoding
- return load_yaml(filename, encoding='utf-8')
+ return load_yaml(filename, encoding='utf-8-sig', binary=False)
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
index eddf787e..419f2e28 100644
--- a/compose/config/config_schema_v2.0.json
+++ b/compose/config/config_schema_v2.0.json
@@ -281,7 +281,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -305,6 +306,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
index 5ad5a20e..3cb1ee21 100644
--- a/compose/config/config_schema_v2.1.json
+++ b/compose/config/config_schema_v2.1.json
@@ -332,7 +332,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -359,6 +360,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
index 26044b65..8e1f288b 100644
--- a/compose/config/config_schema_v2.2.json
+++ b/compose/config/config_schema_v2.2.json
@@ -341,7 +341,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -368,6 +369,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
index ac0778f2..659dbcd1 100644
--- a/compose/config/config_schema_v2.3.json
+++ b/compose/config/config_schema_v2.3.json
@@ -385,7 +385,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -412,6 +413,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.4.json b/compose/config/config_schema_v2.4.json
index 731fa2f9..4e641788 100644
--- a/compose/config/config_schema_v2.4.json
+++ b/compose/config/config_schema_v2.4.json
@@ -346,6 +346,7 @@
"dependencies": {
"memswap_limit": ["mem_limit"]
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false
},
@@ -384,7 +385,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -408,6 +410,25 @@
"labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
"additionalProperties": false
},
@@ -432,6 +453,7 @@
"labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false
},
diff --git a/compose/config/config_schema_v3.7.json b/compose/config/config_schema_v3.7.json
new file mode 100644
index 00000000..cd7882f5
--- /dev/null
+++ b/compose/config/config_schema_v3.7.json
@@ -0,0 +1,602 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.7.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": "boolean"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ },
+ "tmpfs": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "integer",
+ "minimum": 0
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "rollback_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "reservations": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"},
+ "generic_resources": {"$ref": "#/definitions/generic_resources"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "generic_resources": {
+ "id": "#/definitions/generic_resources",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "discrete_resource_spec": {
+ "type": "object",
+ "properties": {
+ "kind": {"type": "string"},
+ "value": {"type": "number"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
index 0087b612..696356f3 100644
--- a/compose/config/environment.py
+++ b/compose/config/environment.py
@@ -5,11 +5,13 @@ import codecs
import contextlib
import logging
import os
+import re
import six
from ..const import IS_WINDOWS_PLATFORM
from .errors import ConfigurationError
+from .errors import EnvFileNotFound
log = logging.getLogger(__name__)
@@ -17,10 +19,16 @@ log = logging.getLogger(__name__)
def split_env(env):
if isinstance(env, six.binary_type):
env = env.decode('utf-8', 'replace')
+ key = value = None
if '=' in env:
- return env.split('=', 1)
+ key, value = env.split('=', 1)
else:
- return env, None
+ key = env
+ if re.search(r'\s', key):
+ raise ConfigurationError(
+ "environment variable name '{}' may not contain whitespace.".format(key)
+ )
+ return key, value
def env_vars_from_file(filename):
@@ -28,16 +36,19 @@ def env_vars_from_file(filename):
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
- raise ConfigurationError("Couldn't find env file: %s" % filename)
+ raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
elif not os.path.isfile(filename):
- raise ConfigurationError("%s is not a file." % (filename))
+ raise EnvFileNotFound("{} is not a file.".format(filename))
env = {}
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
for line in fileobj:
line = line.strip()
if line and not line.startswith('#'):
- k, v = split_env(line)
- env[k] = v
+ try:
+ k, v = split_env(line)
+ env[k] = v
+ except ConfigurationError as e:
+ raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
return env
@@ -45,19 +56,24 @@ class Environment(dict):
def __init__(self, *args, **kwargs):
super(Environment, self).__init__(*args, **kwargs)
self.missing_keys = []
+ self.silent = False
@classmethod
- def from_env_file(cls, base_dir):
+ def from_env_file(cls, base_dir, env_file=None):
def _initialize():
result = cls()
if base_dir is None:
return result
- env_file_path = os.path.join(base_dir, '.env')
+ if env_file:
+ env_file_path = os.path.join(base_dir, env_file)
+ else:
+ env_file_path = os.path.join(base_dir, '.env')
try:
return cls(env_vars_from_file(env_file_path))
- except ConfigurationError:
+ except EnvFileNotFound:
pass
return result
+
instance = _initialize()
instance.update(os.environ)
return instance
@@ -83,8 +99,8 @@ class Environment(dict):
return super(Environment, self).__getitem__(key.upper())
except KeyError:
pass
- if key not in self.missing_keys:
- log.warn(
+ if not self.silent and key not in self.missing_keys:
+ log.warning(
"The {} variable is not set. Defaulting to a blank string."
.format(key)
)
diff --git a/compose/config/errors.py b/compose/config/errors.py
index f5c03808..9b2078f2 100644
--- a/compose/config/errors.py
+++ b/compose/config/errors.py
@@ -19,6 +19,10 @@ class ConfigurationError(Exception):
return self.msg
+class EnvFileNotFound(ConfigurationError):
+ pass
+
+
class DependencyError(ConfigurationError):
pass
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
index 8845d73b..18be8562 100644
--- a/compose/config/interpolation.py
+++ b/compose/config/interpolation.py
@@ -48,7 +48,7 @@ def interpolate_environment_variables(version, config, section, environment):
def get_config_path(config_key, section, name):
- return '{}.{}.{}'.format(section, name, config_key)
+ return '{}/{}/{}'.format(section, name, config_key)
def interpolate_value(name, config_key, value, section, interpolator):
@@ -64,18 +64,18 @@ def interpolate_value(name, config_key, value, section, interpolator):
string=e.string))
except UnsetRequiredSubstitution as e:
raise ConfigurationError(
- 'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
- config_key=config_key,
- name=name,
- section=section,
- err=e.err
- )
+ 'Missing mandatory value for "{config_key}" option interpolating {value} '
+ 'in {section} "{name}": {err}'.format(config_key=config_key,
+ value=value,
+ name=name,
+ section=section,
+ err=e.err)
)
def recursive_interpolate(obj, interpolator, config_path):
def append(config_path, key):
- return '{}.{}'.format(config_path, key)
+ return '{}/{}'.format(config_path, key)
if isinstance(obj, six.string_types):
return converter.convert(config_path, interpolator.interpolate(obj))
@@ -160,12 +160,12 @@ class UnsetRequiredSubstitution(Exception):
self.err = custom_err_msg
-PATH_JOKER = '[^.]+'
+PATH_JOKER = '[^/]+'
FULL_JOKER = '.+'
def re_path(*args):
- return re.compile('^{}$'.format('\.'.join(args)))
+ return re.compile('^{}$'.format('/'.join(args)))
def re_path_basic(section, name):
@@ -248,6 +248,8 @@ class ConversionMap(object):
service_path('deploy', 'replicas'): to_int,
service_path('deploy', 'update_config', 'parallelism'): to_int,
service_path('deploy', 'update_config', 'max_failure_ratio'): to_float,
+ service_path('deploy', 'rollback_config', 'parallelism'): to_int,
+ service_path('deploy', 'rollback_config', 'max_failure_ratio'): to_float,
service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
service_path('mem_swappiness'): to_int,
service_path('labels', FULL_JOKER): to_str,
@@ -286,7 +288,7 @@ class ConversionMap(object):
except ValueError as e:
raise ConfigurationError(
'Error while attempting to convert {} to appropriate type: {}'.format(
- path, e
+ path.replace('/', '.'), e
)
)
return value
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
index c0cf35c1..5776ce95 100644
--- a/compose/config/serialize.py
+++ b/compose/config/serialize.py
@@ -24,14 +24,12 @@ def serialize_dict_type(dumper, data):
def serialize_string(dumper, data):
- """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ """ Ensure boolean-like strings are quoted in the output """
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
if isinstance(data, six.binary_type):
data = data.decode('utf-8')
- data = data.replace('$', '$$')
-
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
# Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side.
@@ -39,6 +37,12 @@ def serialize_string(dumper, data):
return representer(data)
+def serialize_string_escape_dollar(dumper, data):
+ """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ data = data.replace('$', '$$')
+ return serialize_string(dumper, data)
+
+
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
@@ -46,8 +50,6 @@ yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
-yaml.SafeDumper.add_representer(str, serialize_string)
-yaml.SafeDumper.add_representer(six.text_type, serialize_string)
def denormalize_config(config, image_digests=None):
@@ -78,7 +80,11 @@ def denormalize_config(config, image_digests=None):
config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
del conf['name']
elif 'external' in conf:
- conf['external'] = True
+ conf['external'] = bool(conf['external'])
+
+ if 'attachable' in conf and config.version < V3_2:
+ # For compatibility mode, this option is invalid in v2
+ del conf['attachable']
return result
@@ -89,7 +95,13 @@ def v3_introduced_name_key(key):
return V3_5
-def serialize_config(config, image_digests=None):
+def serialize_config(config, image_digests=None, escape_dollar=True):
+ if escape_dollar:
+ yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
+ yaml.SafeDumper.add_representer(six.text_type, serialize_string_escape_dollar)
+ else:
+ yaml.SafeDumper.add_representer(str, serialize_string)
+ yaml.SafeDumper.add_representer(six.text_type, serialize_string)
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
diff --git a/compose/config/types.py b/compose/config/types.py
index ff987521..ab8f34e3 100644
--- a/compose/config/types.py
+++ b/compose/config/types.py
@@ -125,7 +125,7 @@ def parse_extra_hosts(extra_hosts_config):
def normalize_path_for_engine(path):
- """Windows paths, c:\my\path\shiny, need to be changed to be compatible with
+ """Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
@@ -136,6 +136,20 @@ def normalize_path_for_engine(path):
return path.replace('\\', '/')
+def normpath(path, win_host=False):
+ """ Custom path normalizer that handles Compose-specific edge cases like
+ UNIX paths on Windows hosts and vice-versa. """
+
+ sysnorm = ntpath.normpath if win_host else os.path.normpath
+ # If a path looks like a UNIX absolute path on Windows, it probably is;
+ # we'll need to revert the backslashes to forward slashes after normalization
+ flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM
+ path = sysnorm(path)
+ if flip_slashes:
+ path = path.replace('\\', '/')
+ return path
+
+
class MountSpec(object):
options_map = {
'volume': {
@@ -152,12 +166,11 @@ class MountSpec(object):
@classmethod
def parse(cls, mount_dict, normalize=False, win_host=False):
- normpath = ntpath.normpath if win_host else os.path.normpath
if mount_dict.get('source'):
if mount_dict['type'] == 'tmpfs':
raise ConfigurationError('tmpfs mounts can not specify a source')
- mount_dict['source'] = normpath(mount_dict['source'])
+ mount_dict['source'] = normpath(mount_dict['source'], win_host)
if normalize:
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
@@ -247,7 +260,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
else:
external = parts[0]
parts = separate_next_section(parts[1])
- external = ntpath.normpath(external)
+ external = normpath(external, True)
internal = parts[0]
if len(parts) > 1:
if ':' in parts[1]:
diff --git a/compose/config/validation.py b/compose/config/validation.py
index 0fdcb37e..1cceb71f 100644
--- a/compose/config/validation.py
+++ b/compose/config/validation.py
@@ -41,15 +41,15 @@ DOCKER_CONFIG_HINTS = {
}
-VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
+VALID_NAME_CHARS = r'[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
-VALID_IPV4_ADDR = "({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
-VALID_REGEX_IPV4_CIDR = "^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
+VALID_IPV4_ADDR = r"({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
+VALID_REGEX_IPV4_CIDR = r"^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
-VALID_REGEX_IPV6_CIDR = "".join("""
+VALID_REGEX_IPV6_CIDR = "".join(r"""
^
(
(({IPV6_SEG}:){{7}}{IPV6_SEG})|
@@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
)
+def validate_credential_spec(service_config):
+ credential_spec = service_config.config.get('credential_spec')
+ if not credential_spec:
+ return
+
+ if 'registry' not in credential_spec and 'file' not in credential_spec:
+ raise ConfigurationError(
+ "Service '{s.name}' is missing 'credential_spec.file' or "
+ "credential_spec.registry'".format(s=service_config)
+ )
+
+
def get_unsupported_config_msg(path, error_key):
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
if error_key in DOCKER_CONFIG_HINTS:
@@ -330,7 +342,10 @@ def handle_generic_error(error, path):
def parse_key_from_error_msg(error):
- return error.message.split("'")[1]
+ try:
+ return error.message.split("'")[1]
+ except IndexError:
+ return error.message.split('(')[1].split(' ')[0].strip("'")
def path_string(path):
diff --git a/compose/const.py b/compose/const.py
index 200a458a..ab0389ce 100644
--- a/compose/const.py
+++ b/compose/const.py
@@ -7,20 +7,24 @@ from .version import ComposeVersion
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = 60
-IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
LABEL_PROJECT = 'com.docker.compose.project'
+LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir'
+LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files'
+LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file'
LABEL_SERVICE = 'com.docker.compose.service'
LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
+LABEL_SLUG = 'com.docker.compose.slug'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
PARALLEL_LIMIT = 64
SECRETS_PATH = '/run/secrets'
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
COMPOSEFILE_V1 = ComposeVersion('1')
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
@@ -36,6 +40,7 @@ COMPOSEFILE_V3_3 = ComposeVersion('3.3')
COMPOSEFILE_V3_4 = ComposeVersion('3.4')
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
COMPOSEFILE_V3_6 = ComposeVersion('3.6')
+COMPOSEFILE_V3_7 = ComposeVersion('3.7')
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
@@ -51,6 +56,7 @@ API_VERSIONS = {
COMPOSEFILE_V3_4: '1.30',
COMPOSEFILE_V3_5: '1.30',
COMPOSEFILE_V3_6: '1.36',
+ COMPOSEFILE_V3_7: '1.38',
}
API_VERSION_TO_ENGINE_VERSION = {
@@ -67,4 +73,5 @@ API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
+ API_VERSIONS[COMPOSEFILE_V3_7]: '18.06.0',
}
diff --git a/compose/container.py b/compose/container.py
index 0c2ca990..8a2fb240 100644
--- a/compose/container.py
+++ b/compose/container.py
@@ -7,8 +7,13 @@ import six
from docker.errors import ImageNotFound
from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
+from .const import LABEL_SLUG
+from .const import LABEL_VERSION
+from .utils import truncate_id
+from .version import ComposeVersion
class Container(object):
@@ -78,12 +83,16 @@ class Container(object):
@property
def name_without_project(self):
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
- return '{0}_{1}'.format(self.service, self.number)
+ return '{0}_{1}'.format(self.service, self.number if self.number is not None else self.slug)
else:
return self.name
@property
def number(self):
+ if self.one_off:
+ # One-off containers are no longer assigned numbers and use slugs instead.
+ return None
+
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
@@ -91,6 +100,20 @@ class Container(object):
return int(number)
@property
+ def slug(self):
+ if not self.full_slug:
+ return None
+ return truncate_id(self.full_slug)
+
+ @property
+ def full_slug(self):
+ return self.labels.get(LABEL_SLUG)
+
+ @property
+ def one_off(self):
+ return self.labels.get(LABEL_ONE_OFF) == 'True'
+
+ @property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@@ -283,6 +306,12 @@ class Container(object):
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
+ def has_legacy_proj_name(self, project_name):
+ return (
+ ComposeVersion(self.labels.get(LABEL_VERSION)) < ComposeVersion('1.21.0') and
+ self.project != project_name
+ )
+
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
diff --git a/compose/network.py b/compose/network.py
index 1a080c40..84531ecc 100644
--- a/compose/network.py
+++ b/compose/network.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import re
from collections import OrderedDict
from docker.errors import NotFound
@@ -10,9 +11,11 @@ from docker.types import IPAMPool
from docker.utils import version_gte
from docker.utils import version_lt
+from . import __version__
from .config import ConfigurationError
from .const import LABEL_NETWORK
from .const import LABEL_PROJECT
+from .const import LABEL_VERSION
log = logging.getLogger(__name__)
@@ -39,6 +42,7 @@ class Network(object):
self.enable_ipv6 = enable_ipv6
self.labels = labels
self.custom_name = custom_name
+ self.legacy = None
def ensure(self):
if self.external:
@@ -64,8 +68,9 @@ class Network(object):
)
return
+ self._set_legacy_flag()
try:
- data = self.inspect()
+ data = self.inspect(legacy=self.legacy)
check_remote_network_config(data, self)
except NotFound:
driver_name = 'the default driver'
@@ -73,8 +78,7 @@ class Network(object):
driver_name = 'driver "{}"'.format(self.driver)
log.info(
- 'Creating network "{}" with {}'
- .format(self.full_name, driver_name)
+ 'Creating network "{}" with {}'.format(self.full_name, driver_name)
)
self.client.create_network(
@@ -91,22 +95,39 @@ class Network(object):
def remove(self):
if self.external:
- log.info("Network %s is external, skipping", self.full_name)
+ log.info("Network %s is external, skipping", self.true_name)
return
- log.info("Removing network {}".format(self.full_name))
- self.client.remove_network(self.full_name)
+ log.info("Removing network {}".format(self.true_name))
+ self.client.remove_network(self.true_name)
- def inspect(self):
+ def inspect(self, legacy=False):
+ if legacy:
+ return self.client.inspect_network(self.legacy_full_name)
return self.client.inspect_network(self.full_name)
@property
+ def legacy_full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(
+ re.sub(r'[_-]', '', self.project), self.name
+ )
+
+ @property
def full_name(self):
if self.custom_name:
return self.name
return '{0}_{1}'.format(self.project, self.name)
@property
+ def true_name(self):
+ self._set_legacy_flag()
+ if self.legacy:
+ return self.legacy_full_name
+ return self.full_name
+
+ @property
def _labels(self):
if version_lt(self.client._version, '1.23'):
return None
@@ -114,9 +135,19 @@ class Network(object):
labels.update({
LABEL_PROJECT: self.project,
LABEL_NETWORK: self.name,
+ LABEL_VERSION: __version__,
})
return labels
+ def _set_legacy_flag(self):
+ if self.legacy is not None:
+ return
+ try:
+ data = self.inspect(legacy=True)
+ self.legacy = data is not None
+ except NotFound:
+ self.legacy = False
+
def create_ipam_config_from_dict(ipam_dict):
if not ipam_dict:
@@ -150,59 +181,59 @@ def check_remote_ipam_config(remote, local):
remote_ipam = remote.get('IPAM')
ipam_dict = create_ipam_config_from_dict(local.ipam)
if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM driver')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM driver')
if len(ipam_dict['Config']) != 0:
if len(ipam_dict['Config']) != len(remote_ipam['Config']):
- raise NetworkConfigChangedError(local.full_name, 'IPAM configs')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM configs')
remote_configs = sorted(remote_ipam['Config'], key='Subnet')
local_configs = sorted(ipam_dict['Config'], key='Subnet')
while local_configs:
lc = local_configs.pop()
rc = remote_configs.pop()
if lc.get('Subnet') != rc.get('Subnet'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config subnet')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config subnet')
if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config gateway')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config gateway')
if lc.get('IPRange') != rc.get('IPRange'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config ip_range')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config ip_range')
if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config aux_addresses')
remote_opts = remote_ipam.get('Options') or {}
- local_opts = local.ipam.get('options') or {}
+ local_opts = local.ipam.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if remote_opts.get(k) != local_opts.get(k):
- raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+ raise NetworkConfigChangedError(local.true_name, 'IPAM option "{}"'.format(k))
def check_remote_network_config(remote, local):
if local.driver and remote.get('Driver') != local.driver:
- raise NetworkConfigChangedError(local.full_name, 'driver')
+ raise NetworkConfigChangedError(local.true_name, 'driver')
local_opts = local.driver_opts or {}
remote_opts = remote.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
- raise NetworkConfigChangedError(local.full_name, 'option "{}"'.format(k))
+ raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get('Internal', False):
- raise NetworkConfigChangedError(local.full_name, 'internal')
+ raise NetworkConfigChangedError(local.true_name, 'internal')
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
- raise NetworkConfigChangedError(local.full_name, 'enable_ipv6')
+ raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
local_labels = local.labels or {}
- remote_labels = remote.get('Labels', {})
+ remote_labels = remote.get('Labels') or {}
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
- log.warn(
+ log.warning(
'Network {}: label "{}" has changed. It may need to be'
- ' recreated.'.format(local.full_name, k)
+ ' recreated.'.format(local.true_name, k)
)
@@ -245,7 +276,7 @@ class ProjectNetworks(object):
}
unused = set(networks) - set(service_networks) - {'default'}
if unused:
- log.warn(
+ log.warning(
"Some networks were defined but are not used by any service: "
"{}".format(", ".join(unused)))
return cls(service_networks, use_networking)
@@ -257,7 +288,7 @@ class ProjectNetworks(object):
try:
network.remove()
except NotFound:
- log.warn("Network %s not found.", network.full_name)
+ log.warning("Network %s not found.", network.true_name)
def initialize(self):
if not self.use_networking:
@@ -286,13 +317,18 @@ def get_networks(service_dict, network_definitions):
for name, netdef in get_network_defs_for_service(service_dict).items():
network = network_definitions.get(name)
if network:
- networks[network.full_name] = netdef
+ networks[network.true_name] = netdef
else:
raise ConfigurationError(
'Service "{}" uses an undefined network "{}"'
.format(service_dict['name'], name))
- return OrderedDict(sorted(
- networks.items(),
- key=lambda t: t[1].get('priority') or 0, reverse=True
- ))
+ if any([v.get('priority') for v in networks.values()]):
+ return OrderedDict(sorted(
+ networks.items(),
+ key=lambda t: t[1].get('priority') or 0, reverse=True
+ ))
+ else:
+ # Ensure Compose will pick a consistent primary network if no
+ # priority is set
+ return OrderedDict(sorted(networks.items(), key=lambda t: t[0]))
diff --git a/compose/parallel.py b/compose/parallel.py
index a2eb160e..e242a318 100644
--- a/compose/parallel.py
+++ b/compose/parallel.py
@@ -43,14 +43,17 @@ class GlobalLimit(object):
cls.global_limiter = Semaphore(value)
-def parallel_execute_watch(events, writer, errors, results, msg, get_name):
+def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
- writer.write(msg, get_name(obj), 'done', green)
+ if fail_check is not None and fail_check(obj):
+ writer.write(msg, get_name(obj), 'failed', red)
+ else:
+ writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
@@ -72,12 +75,14 @@ def parallel_execute_watch(events, writer, errors, results, msg, get_name):
return error_to_reraise
-def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
+ fail_check is an additional failure check for cases that should display as a failure
+ in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
@@ -96,7 +101,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
errors = {}
results = []
- error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
+ error_to_reraise = parallel_execute_watch(
+ events, writer, errors, results, msg, get_name, fail_check
+ )
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
@@ -313,6 +320,13 @@ class ParallelStreamWriter(object):
self._write_ansi(msg, obj_index, color_func(status))
+def get_stream_writer():
+ instance = ParallelStreamWriter.instance
+ if instance is None:
+ raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
+ return instance
+
+
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
index 5e709770..c4281cb4 100644
--- a/compose/progress_stream.py
+++ b/compose/progress_stream.py
@@ -19,12 +19,11 @@ def write_to_stream(s, stream):
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
- all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
- all_events.append(event)
+ yield event
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
@@ -57,8 +56,6 @@ def stream_output(output, stream):
stream.flush()
- return all_events
-
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
@@ -101,14 +98,14 @@ def print_output_event(event, stream, is_terminal):
def get_digest_from_pull(events):
+ digest = None
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
-
- _, digest = status.split(':', 1)
- return digest.strip()
- return None
+ else:
+ digest = status.split(':', 1)[1].strip()
+ return digest
def get_digest_from_push(events):
diff --git a/compose/project.py b/compose/project.py
index 924390b4..094ce4d7 100644
--- a/compose/project.py
+++ b/compose/project.py
@@ -4,18 +4,20 @@ from __future__ import unicode_literals
import datetime
import logging
import operator
+import re
from functools import reduce
+from os import path
import enum
import six
from docker.errors import APIError
+from docker.utils import version_lt
from . import parallel
from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
from .config.sort_services import get_service_name_from_network_mode
-from .const import IMAGE_EVENTS
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
@@ -28,12 +30,13 @@ from .service import ContainerNetworkMode
from .service import ContainerPidMode
from .service import ConvergenceStrategy
from .service import NetworkMode
+from .service import parse_repository_tag
from .service import PidMode
from .service import Service
-from .service import ServiceName
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
+from .utils import truncate_string
from .volume import ProjectVolumes
@@ -70,14 +73,17 @@ class Project(object):
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
- def labels(self, one_off=OneOffFilter.exclude):
- labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
+ def labels(self, one_off=OneOffFilter.exclude, legacy=False):
+ name = self.name
+ if legacy:
+ name = re.sub(r'[_-]', '', name)
+ labels = ['{0}={1}'.format(LABEL_PROJECT, name)]
OneOffFilter.update_labels(one_off, labels)
return labels
@classmethod
- def from_config(cls, name, config_data, client, default_platform=None):
+ def from_config(cls, name, config_data, client, default_platform=None, extra_labels=[]):
"""
Construct a Project from a config.Config object.
"""
@@ -128,7 +134,9 @@ class Project(object):
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
- platform=service_dict.pop('platform', default_platform),
+ platform=service_dict.pop('platform', None),
+ default_platform=default_platform,
+ extra_labels=extra_labels,
**service_dict)
)
@@ -193,25 +201,6 @@ class Project(object):
service.remove_duplicate_containers()
return services
- def get_scaled_services(self, services, scale_override):
- """
- Returns a list of this project's services as scaled ServiceName objects.
-
- services: a list of Service objects
- scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
- """
- service_names = []
- for service in services:
- if service.name in scale_override:
- scale = scale_override[service.name]
- else:
- scale = service.scale_num
-
- for i in range(1, scale + 1):
- service_names.append(ServiceName(self.name, service.name, i))
-
- return service_names
-
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
@@ -293,6 +282,7 @@ class Project(object):
operator.attrgetter('name'),
'Starting',
get_deps,
+ fail_check=lambda obj: not obj.containers(),
)
return containers
@@ -367,13 +357,45 @@ class Project(object):
return containers
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
- build_args=None, gzip=False):
+ build_args=None, gzip=False, parallel_build=False, rm=True, silent=False, cli=False,
+ progress=None):
+
+ services = []
for service in self.get_services(service_names):
if service.can_be_built():
- service.build(no_cache, pull, force_rm, memory, build_args, gzip)
- else:
+ services.append(service)
+ elif not silent:
log.info('%s uses an image, skipping' % service.name)
+ if cli:
+ log.warning("Native build is an experimental feature and could change at any time")
+ if parallel_build:
+ log.warning("Flag '--parallel' is ignored when building with "
+ "COMPOSE_DOCKER_CLI_BUILD=1")
+ if gzip:
+ log.warning("Flag '--compress' is ignored when building with "
+ "COMPOSE_DOCKER_CLI_BUILD=1")
+
+ def build_service(service):
+ service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress)
+ if parallel_build:
+ _, errors = parallel.parallel_execute(
+ services,
+ build_service,
+ operator.attrgetter('name'),
+ 'Building',
+ limit=5,
+ )
+ if len(errors):
+ combined_errors = '\n'.join([
+ e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
+ ])
+ raise ProjectError(combined_errors)
+
+ else:
+ for service in services:
+ build_service(service)
+
def create(
self,
service_names=None,
@@ -392,11 +414,13 @@ class Project(object):
detached=True,
start=False)
- def events(self, service_names=None):
+ def _legacy_event_processor(self, service_names):
+ # Only for v1 files or when Compose is forced to use an older API version
def build_container_event(event, container):
time = datetime.datetime.fromtimestamp(event['time'])
time = time.replace(
- microsecond=microseconds_from_time_nano(event['timeNano']))
+ microsecond=microseconds_from_time_nano(event['timeNano'])
+ )
return {
'time': time,
'type': 'container',
@@ -415,17 +439,15 @@ class Project(object):
filters={'label': self.labels()},
decode=True
):
- # The first part of this condition is a guard against some events
- # broadcasted by swarm that don't have a status field.
+ # This is a guard against some events broadcasted by swarm that
+ # don't have a status field.
# See https://github.com/docker/compose/issues/3316
- if 'status' not in event or event['status'] in IMAGE_EVENTS:
- # We don't receive any image events because labels aren't applied
- # to images
+ if 'status' not in event:
continue
- # TODO: get labels from the API v1.22 , see github issue 2618
try:
- # this can fail if the container has been removed
+ # this can fail if the container has been removed or if the event
+ # refers to an image
container = Container.from_id(self.client, event['id'])
except APIError:
continue
@@ -433,6 +455,56 @@ class Project(object):
continue
yield build_container_event(event, container)
+ def events(self, service_names=None):
+ if version_lt(self.client.api_version, '1.22'):
+ # New, better event API was introduced in 1.22.
+ return self._legacy_event_processor(service_names)
+
+ def build_container_event(event):
+ container_attrs = event['Actor']['Attributes']
+ time = datetime.datetime.fromtimestamp(event['time'])
+ time = time.replace(
+ microsecond=microseconds_from_time_nano(event['timeNano'])
+ )
+
+ container = None
+ try:
+ container = Container.from_id(self.client, event['id'])
+ except APIError:
+ # Container may have been removed (e.g. if this is a destroy event)
+ pass
+
+ return {
+ 'time': time,
+ 'type': 'container',
+ 'action': event['status'],
+ 'id': event['Actor']['ID'],
+ 'service': container_attrs.get(LABEL_SERVICE),
+ 'attributes': dict([
+ (k, v) for k, v in container_attrs.items()
+ if not k.startswith('com.docker.compose.')
+ ]),
+ 'container': container,
+ }
+
+ def yield_loop(service_names):
+ for event in self.client.events(
+ filters={'label': self.labels()},
+ decode=True
+ ):
+ # TODO: support other event types
+ if event.get('Type') != 'container':
+ continue
+
+ try:
+ if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names:
+ continue
+ except KeyError:
+ continue
+ yield build_container_event(event)
+
+ return yield_loop(set(service_names) if service_names else self.service_names)
+
def up(self,
service_names=None,
start_deps=True,
@@ -449,8 +521,12 @@ class Project(object):
reset_container_image=False,
renew_anonymous_volumes=False,
silent=False,
+ cli=False,
):
+ if cli:
+ log.warning("Native build is an experimental feature and could change at any time")
+
self.initialize()
if not ignore_orphans:
self.find_orphan_containers(remove_orphans)
@@ -463,10 +539,9 @@ class Project(object):
include_deps=start_deps)
for svc in services:
- svc.ensure_image_exists(do_build=do_build, silent=silent)
+ svc.ensure_image_exists(do_build=do_build, silent=silent, cli=cli)
plans = self._get_convergence_plans(
services, strategy, always_recreate_deps=always_recreate_deps)
- scaled_services = self.get_scaled_services(services, scale_override)
def do(service):
@@ -477,7 +552,6 @@ class Project(object):
scale_override=scale_override.get(service.name),
rescale=rescale,
start=start,
- project_services=scaled_services,
reset_container_image=reset_container_image,
renew_anonymous_volumes=renew_anonymous_volumes,
)
@@ -528,8 +602,10 @@ class Project(object):
", ".join(updated_dependencies))
containers_stopped = any(
service.containers(stopped=True, filters={'status': ['created', 'exited']}))
- has_links = any(c.get('HostConfig.Links') for c in service.containers())
- if always_recreate_deps or containers_stopped or not has_links:
+ service_has_links = any(service.get_link_names())
+ container_has_links = any(c.get('HostConfig.Links') for c in service.containers())
+ should_recreate_for_links = service_has_links ^ container_has_links
+ if always_recreate_deps or containers_stopped or should_recreate_for_links:
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
@@ -543,16 +619,38 @@ class Project(object):
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
include_deps=False):
services = self.get_services(service_names, include_deps)
+ images_to_build = {service.image_name for service in services if service.can_be_built()}
+ services_to_pull = [service for service in services if service.image_name not in images_to_build]
+
+ msg = not silent and 'Pulling' or None
if parallel_pull:
def pull_service(service):
- service.pull(ignore_pull_failures, True)
+ strm = service.pull(ignore_pull_failures, True, stream=True)
+ if strm is None: # Attempting to pull service with no `image` key is a no-op
+ return
+
+ writer = parallel.get_stream_writer()
+
+ for event in strm:
+ if 'status' not in event:
+ continue
+ status = event['status'].lower()
+ if 'progressDetail' in event:
+ detail = event['progressDetail']
+ if 'current' in detail and 'total' in detail:
+ percentage = float(detail['current']) / float(detail['total'])
+ status = '{} ({:.1%})'.format(status, percentage)
+
+ writer.write(
+ msg, service.name, truncate_string(status), lambda s: s
+ )
_, errors = parallel.parallel_execute(
- services,
+ services_to_pull,
pull_service,
operator.attrgetter('name'),
- not silent and 'Pulling' or None,
+ msg,
limit=5,
)
if len(errors):
@@ -562,20 +660,36 @@ class Project(object):
raise ProjectError(combined_errors)
else:
- for service in services:
+ for service in services_to_pull:
service.pull(ignore_pull_failures, silent=silent)
def push(self, service_names=None, ignore_push_failures=False):
+ unique_images = set()
for service in self.get_services(service_names, include_deps=False):
- service.push(ignore_push_failures)
+ # Considering <image> and <image:latest> as the same
+ repo, tag, sep = parse_repository_tag(service.image_name)
+ service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest'))
+
+ if service_image_name not in unique_images:
+ service.push(ignore_push_failures)
+ unique_images.add(service_image_name)
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
- return list(filter(None, [
+ ctnrs = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
)
+ if ctnrs:
+ return ctnrs
+
+ return list(filter(lambda c: c.has_legacy_proj_name(self.name), filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters={'label': self.labels(one_off=one_off, legacy=True)})])
+ ))
def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
if service_names:
@@ -592,7 +706,7 @@ class Project(object):
def find_orphan_containers(self, remove_orphans):
def _find():
- containers = self._labeled_containers()
+ containers = set(self._labeled_containers() + self._labeled_containers(stopped=True))
for ctnr in containers:
service_name = ctnr.labels.get(LABEL_SERVICE)
if service_name not in self.service_names:
@@ -603,7 +717,10 @@ class Project(object):
if remove_orphans:
for ctnr in orphans:
log.info('Removing orphan container "{0}"'.format(ctnr.name))
- ctnr.kill()
+ try:
+ ctnr.kill()
+ except APIError:
+ pass
ctnr.remove(force=True)
else:
log.warning(
@@ -631,10 +748,11 @@ class Project(object):
def build_container_operation_with_timeout_func(self, operation, options):
def container_operation_with_timeout(container):
- if options.get('timeout') is None:
+ _options = options.copy()
+ if _options.get('timeout') is None:
service = self.get_service(container.service)
- options['timeout'] = service.stop_timeout(None)
- return getattr(container, operation)(**options)
+ _options['timeout'] = service.stop_timeout(None)
+ return getattr(container, operation)(**_options)
return container_operation_with_timeout
@@ -677,13 +795,13 @@ def get_secrets(service, service_secrets, secret_defs):
.format(service=service, secret=secret.source))
if secret_def.get('external'):
- log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
- "External secrets are not available to containers created by "
- "docker-compose.".format(service=service, secret=secret.source))
+ log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
+ "External secrets are not available to containers created by "
+ "docker-compose.".format(service=service, secret=secret.source))
continue
if secret.uid or secret.gid or secret.mode:
- log.warn(
+ log.warning(
"Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format(
@@ -691,7 +809,15 @@ def get_secrets(service, service_secrets, secret_defs):
)
)
- secrets.append({'secret': secret, 'file': secret_def.get('file')})
+ secret_file = secret_def.get('file')
+ if not path.isfile(str(secret_file)):
+ log.warning(
+ "Service \"{service}\" uses an undefined secret file \"{secret_file}\", "
+ "the following file should be created \"{secret_file}\"".format(
+ service=service, secret_file=secret_file
+ )
+ )
+ secrets.append({'secret': secret, 'file': secret_file})
return secrets
diff --git a/compose/service.py b/compose/service.py
index bb9e26ba..d329be97 100644
--- a/compose/service.py
+++ b/compose/service.py
@@ -1,10 +1,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import itertools
+import json
import logging
import os
import re
import sys
+import tempfile
from collections import namedtuple
from collections import OrderedDict
from operator import attrgetter
@@ -26,6 +29,7 @@ from . import __version__
from . import const
from . import progress_stream
from .config import DOCKER_CONFIG_KEYS
+from .config import is_url
from .config import merge_environment
from .config import merge_labels
from .config.errors import DependencyError
@@ -39,8 +43,10 @@ from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
+from .const import LABEL_SLUG
from .const import LABEL_VERSION
from .const import NANOCPUS_SCALE
+from .const import WINDOWS_LONGPATH_PREFIX
from .container import Container
from .errors import HealthCheckFailed
from .errors import NoHealthCheckConfigured
@@ -48,14 +54,20 @@ from .errors import OperationFailedError
from .parallel import parallel_execute
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
+from .utils import generate_random_id
from .utils import json_hash
from .utils import parse_bytes
from .utils import parse_seconds_float
+from .utils import truncate_id
+from .utils import unique_everseen
+if six.PY2:
+ import subprocess32 as subprocess
+else:
+ import subprocess
log = logging.getLogger(__name__)
-
HOST_CONFIG_KEYS = [
'cap_add',
'cap_drop',
@@ -79,6 +91,7 @@ HOST_CONFIG_KEYS = [
'group_add',
'init',
'ipc',
+ 'isolation',
'read_only',
'log_driver',
'log_opt',
@@ -123,7 +136,6 @@ class NoSuchImageError(Exception):
ServiceName = namedtuple('ServiceName', 'project service number')
-
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
@@ -159,19 +171,21 @@ class BuildAction(enum.Enum):
class Service(object):
def __init__(
- self,
- name,
- client=None,
- project='default',
- use_networking=False,
- links=None,
- volumes_from=None,
- network_mode=None,
- networks=None,
- secrets=None,
- scale=None,
- pid_mode=None,
- **options
+ self,
+ name,
+ client=None,
+ project='default',
+ use_networking=False,
+ links=None,
+ volumes_from=None,
+ network_mode=None,
+ networks=None,
+ secrets=None,
+ scale=1,
+ pid_mode=None,
+ default_platform=None,
+ extra_labels=[],
+ **options
):
self.name = name
self.client = client
@@ -183,28 +197,45 @@ class Service(object):
self.pid_mode = pid_mode or PidMode(None)
self.networks = networks or {}
self.secrets = secrets or []
- self.scale_num = scale or 1
+ self.scale_num = scale
+ self.default_platform = default_platform
self.options = options
+ self.extra_labels = extra_labels
def __repr__(self):
return '<Service: {}>'.format(self.name)
- def containers(self, stopped=False, one_off=False, filters={}):
- filters.update({'label': self.labels(one_off=one_off)})
+ def containers(self, stopped=False, one_off=False, filters=None, labels=None):
+ if filters is None:
+ filters = {}
+ filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
- return list(filter(None, [
+ result = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
- filters=filters)]))
+ filters=filters)])
+ )
+ if result:
+ return result
+
+ filters.update({'label': self.labels(one_off=one_off, legacy=True) + (labels or [])})
+ return list(
+ filter(
+ lambda c: c.has_legacy_proj_name(self.project), filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters=filters)])
+ )
+ )
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
- labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
- for container in self.client.containers(filters={'label': labels}):
- return Container.from_ps(self.client, container)
+ for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
+ return container
raise ValueError("No container found for %s_%s" % (self.name, number))
@@ -216,15 +247,15 @@ class Service(object):
def show_scale_warnings(self, desired_num):
if self.custom_container_name and desired_num > 1:
- log.warn('The "%s" service is using the custom container name "%s". '
- 'Docker requires each container to have a unique name. '
- 'Remove the custom name to scale the service.'
- % (self.name, self.custom_container_name))
+ log.warning('The "%s" service is using the custom container name "%s". '
+ 'Docker requires each container to have a unique name. '
+ 'Remove the custom name to scale the service.'
+ % (self.name, self.custom_container_name))
if self.specifies_host_port() and desired_num > 1:
- log.warn('The "%s" service specifies a port on the host. If multiple containers '
- 'for this service are created on a single host, the port will clash.'
- % self.name)
+ log.warning('The "%s" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'
+ % self.name)
def scale(self, desired_num, timeout=None):
"""
@@ -241,6 +272,11 @@ class Service(object):
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
+ for c in running_containers:
+ if not c.has_legacy_proj_name(self.project):
+ continue
+ log.info('Recreating container with legacy name %s' % c.name)
+ self.recreate_container(c, timeout, start_new_container=False)
if desired_num == num_running:
# do nothing as we already have the desired number
@@ -261,7 +297,7 @@ class Service(object):
c for c in stopped_containers if self._containers_have_diverged([c])
]
for c in divergent_containers:
- c.remove()
+ c.remove()
all_containers = list(set(all_containers) - set(divergent_containers))
@@ -309,9 +345,9 @@ class Service(object):
raise OperationFailedError("Cannot create container for service %s: %s" %
(self.name, ex.explanation))
- def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
+ def ensure_image_exists(self, do_build=BuildAction.none, silent=False, cli=False):
if self.can_be_built() and do_build == BuildAction.force:
- self.build()
+ self.build(cli=cli)
return
try:
@@ -327,12 +363,18 @@ class Service(object):
if do_build == BuildAction.skip:
raise NeedsBuildError(self)
- self.build()
- log.warn(
+ self.build(cli=cli)
+ log.warning(
"Image for service {} was built because it did not already exist. To "
"rebuild this image you must use `docker-compose build` or "
"`docker-compose up --build`.".format(self.name))
+ def get_image_registry_data(self):
+ try:
+ return self.client.inspect_distribution(self.image_name)
+ except APIError:
+ raise NoSuchImageError("Image '{}' not found".format(self.image_name))
+
def image(self):
try:
return self.client.inspect_image(self.image_name)
@@ -341,7 +383,16 @@ class Service(object):
@property
def image_name(self):
- return self.options.get('image', '{s.project}_{s.name}'.format(s=self))
+ return self.options.get('image', '{project}_{s.name}'.format(
+ s=self, project=self.project.lstrip('_-')
+ ))
+
+ @property
+ def platform(self):
+ platform = self.options.get('platform')
+ if not platform and version_gte(self.client.api_version, '1.35'):
+ platform = self.default_platform
+ return platform
def convergence_plan(self, strategy=ConvergenceStrategy.changed):
containers = self.containers(stopped=True)
@@ -353,8 +404,8 @@ class Service(object):
return ConvergencePlan('start', containers)
if (
- strategy is ConvergenceStrategy.always or
- self._containers_have_diverged(containers)
+ strategy is ConvergenceStrategy.always or
+ self._containers_have_diverged(containers)
):
return ConvergencePlan('recreate', containers)
@@ -380,6 +431,10 @@ class Service(object):
has_diverged = False
for c in containers:
+ if c.has_legacy_proj_name(self.project):
+ log.debug('%s has diverged: Legacy project name' % c.name)
+ has_diverged = True
+ continue
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
@@ -390,74 +445,79 @@ class Service(object):
return has_diverged
- def _execute_convergence_create(self, scale, detached, start, project_services=None):
- i = self._next_container_number()
-
- def create_and_start(service, n):
- container = service.create_container(number=n, quiet=True)
- if not detached:
- container.attach_log_stream()
- if start:
- self.start_container(container)
- return container
-
- containers, errors = parallel_execute(
- [ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
- lambda service_name: create_and_start(self, service_name.number),
- lambda service_name: self.get_container_name(service_name.service, service_name.number),
- "Creating"
- )
- for error in errors.values():
- raise OperationFailedError(error)
+ def _execute_convergence_create(self, scale, detached, start):
- return containers
+ i = self._next_container_number()
+
+ def create_and_start(service, n):
+ container = service.create_container(number=n, quiet=True)
+ if not detached:
+ container.attach_log_stream()
+ if start:
+ self.start_container(container)
+ return container
+
+ containers, errors = parallel_execute(
+ [
+ ServiceName(self.project, self.name, index)
+ for index in range(i, i + scale)
+ ],
+ lambda service_name: create_and_start(self, service_name.number),
+ lambda service_name: self.get_container_name(service_name.service, service_name.number),
+ "Creating"
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ return containers
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
renew_anonymous_volumes):
- if scale is not None and len(containers) > scale:
- self._downscale(containers[scale:], timeout)
- containers = containers[:scale]
-
- def recreate(container):
- return self.recreate_container(
- container, timeout=timeout, attach_logs=not detached,
- start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
- )
- containers, errors = parallel_execute(
- containers,
- recreate,
- lambda c: c.name,
- "Recreating",
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+
+ def recreate(container):
+ return self.recreate_container(
+ container, timeout=timeout, attach_logs=not detached,
+ start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
)
- for error in errors.values():
- raise OperationFailedError(error)
- if scale is not None and len(containers) < scale:
- containers.extend(self._execute_convergence_create(
- scale - len(containers), detached, start
- ))
- return containers
+ containers, errors = parallel_execute(
+ containers,
+ recreate,
+ lambda c: c.name,
+ "Recreating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
- if scale is not None and len(containers) > scale:
- self._downscale(containers[scale:], timeout)
- containers = containers[:scale]
- if start:
- _, errors = parallel_execute(
- containers,
- lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
- lambda c: c.name,
- "Starting",
- )
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+ if start:
+ _, errors = parallel_execute(
+ containers,
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
+ lambda c: c.name,
+ "Starting",
+ )
- for error in errors.values():
- raise OperationFailedError(error)
+ for error in errors.values():
+ raise OperationFailedError(error)
- if scale is not None and len(containers) < scale:
- containers.extend(self._execute_convergence_create(
- scale - len(containers), detached, start
- ))
- return containers
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
def _downscale(self, containers, timeout=None):
def stop_and_remove(container):
@@ -473,8 +533,8 @@ class Service(object):
def execute_convergence_plan(self, plan, timeout=None, detached=False,
start=True, scale_override=None,
- rescale=True, project_services=None,
- reset_container_image=False, renew_anonymous_volumes=False):
+ rescale=True, reset_container_image=False,
+ renew_anonymous_volumes=False):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
@@ -483,7 +543,7 @@ class Service(object):
if action == 'create':
return self._execute_convergence_create(
- scale, detached, start, project_services
+ scale, detached, start
)
# The create action needs always needs an initial scale, but otherwise,
@@ -533,7 +593,7 @@ class Service(object):
container.rename_to_tmp_name()
new_container = self.create_container(
previous_container=container if not renew_anonymous_volumes else None,
- number=container.labels.get(LABEL_CONTAINER_NUMBER),
+ number=container.number,
quiet=True,
)
if attach_logs:
@@ -564,6 +624,8 @@ class Service(object):
try:
container.start()
except APIError as ex:
+ if "driver failed programming external connectivity" in ex.explanation:
+ log.warn("Host is already in use by another container")
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
return container
@@ -621,12 +683,19 @@ class Service(object):
return json_hash(self.config_dict())
def config_dict(self):
+ def image_id():
+ try:
+ return self.image()['Id']
+ except NoSuchImageError:
+ return None
+
return {
'options': self.options,
- 'image_id': self.image()['Id'],
+ 'image_id': image_id(),
'links': self.get_link_names(),
'net': self.network_mode.id,
'networks': self.networks,
+ 'secrets': self.secrets,
'volumes_from': [
(v.source.name, v.mode)
for v in self.volumes_from if isinstance(v.source, Service)
@@ -637,11 +706,11 @@ class Service(object):
net_name = self.network_mode.service_name
pid_namespace = self.pid_mode.service_name
return (
- self.get_linked_service_names() +
- self.get_volumes_from_names() +
- ([net_name] if net_name else []) +
- ([pid_namespace] if pid_namespace else []) +
- list(self.options.get('depends_on', {}).keys())
+ self.get_linked_service_names() +
+ self.get_volumes_from_names() +
+ ([net_name] if net_name else []) +
+ ([pid_namespace] if pid_namespace else []) +
+ list(self.options.get('depends_on', {}).keys())
)
def get_dependency_configs(self):
@@ -682,14 +751,19 @@ class Service(object):
def get_volumes_from_names(self):
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
- # TODO: this would benefit from github.com/docker/docker/pull/14699
- # to remove the need to inspect every container
def _next_container_number(self, one_off=False):
- containers = self._fetch_containers(
- all=True,
- filters={'label': self.labels(one_off=one_off)}
+ if one_off:
+ return None
+ containers = itertools.chain(
+ self._fetch_containers(
+ all=True,
+ filters={'label': self.labels(one_off=False)}
+ ), self._fetch_containers(
+ all=True,
+ filters={'label': self.labels(one_off=False, legacy=True)}
+ )
)
- numbers = [c.number for c in containers]
+ numbers = [c.number for c in containers if c.number is not None]
return 1 if not numbers else max(numbers) + 1
def _fetch_containers(self, **fetch_options):
@@ -767,6 +841,7 @@ class Service(object):
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
+ slug = generate_random_id() if one_off else None
container_options = dict(
(k, self.options[k])
@@ -775,7 +850,7 @@ class Service(object):
container_options.update(override_options)
if not container_options.get('name'):
- container_options['name'] = self.get_container_name(self.name, number, one_off)
+ container_options['name'] = self.get_container_name(self.name, number, slug)
container_options.setdefault('detach', True)
@@ -825,9 +900,11 @@ class Service(object):
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
- self.labels(one_off=one_off),
+ self.labels(one_off=one_off) + self.extra_labels,
number,
- self.config_hash if add_config_hash else None)
+ self.config_hash if add_config_hash else None,
+ slug
+ )
# Delete options which are only used in HostConfig
for key in HOST_CONFIG_KEYS:
@@ -858,7 +935,6 @@ class Service(object):
container_volumes, self.options.get('tmpfs') or [], previous_container,
container_mounts
)
- override_options['binds'] = binds
container_options['environment'].update(affinity)
container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
@@ -871,13 +947,13 @@ class Service(object):
if m.is_tmpfs:
override_options['tmpfs'].append(m.target)
else:
- override_options['binds'].append(m.legacy_repr())
+ binds.append(m.legacy_repr())
container_options['volumes'][m.target] = {}
secret_volumes = self.get_secret_volumes()
if secret_volumes:
if version_lt(self.client.api_version, '1.30'):
- override_options['binds'].extend(v.legacy_repr() for v in secret_volumes)
+ binds.extend(v.legacy_repr() for v in secret_volumes)
container_options['volumes'].update(
(v.target, {}) for v in secret_volumes
)
@@ -885,6 +961,9 @@ class Service(object):
override_options['mounts'] = override_options.get('mounts') or []
override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
+ # Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885).
+ # unique_everseen preserves order. (see https://github.com/docker/compose/issues/6091).
+ override_options['binds'] = list(unique_everseen(binds))
return container_options, override_options
def _get_container_host_config(self, override_options, one_off=False):
@@ -980,8 +1059,11 @@ class Service(object):
return [build_spec(secret) for secret in self.secrets]
def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
- gzip=False):
- log.info('Building %s' % self.name)
+ gzip=False, rm=True, silent=False, cli=False, progress=None):
+ output_stream = open(os.devnull, 'w')
+ if not silent:
+ output_stream = sys.stdout
+ log.info('Building %s' % self.name)
build_opts = self.options.get('build', {})
@@ -992,27 +1074,22 @@ class Service(object):
for k, v in self._parse_proxy_config().items():
build_args.setdefault(k, v)
- # python2 os.stat() doesn't support unicode on some UNIX, so we
- # encode it to a bytestring to be safe
- path = build_opts.get('context')
- if not six.PY3 and not IS_WINDOWS_PLATFORM:
- path = path.encode('utf8')
-
- platform = self.options.get('platform')
- if platform and version_lt(self.client.api_version, '1.35'):
+ path = rewrite_build_path(build_opts.get('context'))
+ if self.platform and version_lt(self.client.api_version, '1.35'):
raise OperationFailedError(
'Impossible to perform platform-targeted builds for API version < 1.35'
)
- build_output = self.client.build(
+ builder = self.client if not cli else _CLIBuilder(progress)
+ build_output = builder.build(
path=path,
tag=self.image_name,
- rm=True,
+ rm=rm,
forcerm=force_rm,
pull=pull,
nocache=no_cache,
dockerfile=build_opts.get('dockerfile', None),
- cache_from=build_opts.get('cache_from', None),
+ cache_from=self.get_cache_from(build_opts),
labels=build_opts.get('labels', None),
buildargs=build_args,
network_mode=build_opts.get('network', None),
@@ -1024,11 +1101,11 @@ class Service(object):
},
gzip=gzip,
isolation=build_opts.get('isolation', self.options.get('isolation', None)),
- platform=platform,
+ platform=self.platform,
)
try:
- all_events = stream_output(build_output, sys.stdout)
+ all_events = list(stream_output(build_output, output_stream))
except StreamOutputError as e:
raise BuildError(self, six.text_type(e))
@@ -1050,26 +1127,33 @@ class Service(object):
return image_id
+ def get_cache_from(self, build_opts):
+ cache_from = build_opts.get('cache_from', None)
+ if cache_from is not None:
+ cache_from = [tag for tag in cache_from if tag]
+ return cache_from
+
def can_be_built(self):
return 'build' in self.options
- def labels(self, one_off=False):
+ def labels(self, one_off=False, legacy=False):
+ proj_name = self.project if not legacy else re.sub(r'[_-]', '', self.project)
return [
- '{0}={1}'.format(LABEL_PROJECT, self.project),
+ '{0}={1}'.format(LABEL_PROJECT, proj_name),
'{0}={1}'.format(LABEL_SERVICE, self.name),
- '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
+ '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@property
def custom_container_name(self):
return self.options.get('container_name')
- def get_container_name(self, service_name, number, one_off=False):
- if self.custom_container_name and not one_off:
+ def get_container_name(self, service_name, number, slug=None):
+ if self.custom_container_name and slug is None:
return self.custom_container_name
container_name = build_container_name(
- self.project, service_name, number, one_off,
+ self.project, service_name, number, slug,
)
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
if container_name in ext_links_origins:
@@ -1090,6 +1174,9 @@ class Service(object):
try:
self.client.remove_image(self.image_name)
return True
+ except ImageNotFound:
+ log.warning("Image %s not found.", self.image_name)
+ return False
except APIError as e:
log.error("Failed to remove image for service %s: %s", self.name, e)
return False
@@ -1121,7 +1208,23 @@ class Service(object):
return any(has_host_port(binding) for binding in self.options.get('ports', []))
- def pull(self, ignore_pull_failures=False, silent=False):
+ def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures):
+ try:
+ output = self.client.pull(repo, **pull_kwargs)
+ if silent:
+ with open(os.devnull, 'w') as devnull:
+ for event in stream_output(output, devnull):
+ yield event
+ else:
+ for event in stream_output(output, sys.stdout):
+ yield event
+ except (StreamOutputError, NotFound) as e:
+ if not ignore_pull_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def pull(self, ignore_pull_failures=False, silent=False, stream=False):
if 'image' not in self.options:
return
@@ -1129,29 +1232,20 @@ class Service(object):
kwargs = {
'tag': tag or 'latest',
'stream': True,
- 'platform': self.options.get('platform'),
+ 'platform': self.platform,
}
if not silent:
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
if kwargs['platform'] and version_lt(self.client.api_version, '1.35'):
raise OperationFailedError(
- 'Impossible to perform platform-targeted builds for API version < 1.35'
+ 'Impossible to perform platform-targeted pulls for API version < 1.35'
)
- try:
- output = self.client.pull(repo, **kwargs)
- if silent:
- with open(os.devnull, 'w') as devnull:
- return progress_stream.get_digest_from_pull(
- stream_output(output, devnull))
- else:
- return progress_stream.get_digest_from_pull(
- stream_output(output, sys.stdout))
- except (StreamOutputError, NotFound) as e:
- if not ignore_pull_failures:
- raise
- else:
- log.error(six.text_type(e))
+
+ event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures)
+ if stream:
+ return event_stream
+ return progress_stream.get_digest_from_pull(event_stream)
def push(self, ignore_push_failures=False):
if 'image' not in self.options or 'build' not in self.options:
@@ -1248,7 +1342,7 @@ class ServicePidMode(PidMode):
if containers:
return 'container:' + containers[0].id
- log.warn(
+ log.warning(
"Service %s is trying to use reuse the PID namespace "
"of another service that is not running." % (self.service_name)
)
@@ -1311,19 +1405,21 @@ class ServiceNetworkMode(object):
if containers:
return 'container:' + containers[0].id
- log.warn("Service %s is trying to use reuse the network stack "
- "of another service that is not running." % (self.id))
+ log.warning("Service %s is trying to use reuse the network stack "
+ "of another service that is not running." % (self.id))
return None
# Names
-def build_container_name(project, service, number, one_off=False):
- bits = [project, service]
- if one_off:
- bits.append('run')
- return '_'.join(bits + [str(number)])
+def build_container_name(project, service, number, slug=None):
+ bits = [project.lstrip('-_'), service]
+ if slug:
+ bits.extend(['run', truncate_id(slug)])
+ else:
+ bits.append(str(number))
+ return '_'.join(bits)
# Images
@@ -1366,7 +1462,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
"""
affinity = {}
- volume_bindings = dict(
+ volume_bindings = OrderedDict(
build_volume_binding(volume)
for volume in volumes
if volume.external
@@ -1426,6 +1522,11 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
if not mount.get('Name'):
continue
+ # Volume (probably an image volume) is overridden by a mount in the service's config
+ # and would cause a duplicate mountpoint error
+ if volume.internal in [m.target for m in mounts_option]:
+ continue
+
# Copy existing volume from old container
volume = volume._replace(external=mount['Name'])
volumes.append(volume)
@@ -1452,11 +1553,11 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
for volume in volumes_option:
if (
- volume.external and
- volume.internal in container_volumes and
- container_volumes.get(volume.internal) != volume.external
+ volume.external and
+ volume.internal in container_volumes and
+ container_volumes.get(volume.internal) != volume.external
):
- log.warn((
+ log.warning((
"Service \"{service}\" is using volume \"{volume}\" from the "
"previous container. Host mapping \"{host_path}\" has no effect. "
"Remove the existing containers (with `docker-compose rm {service}`) "
@@ -1501,13 +1602,17 @@ def build_mount(mount_spec):
read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
)
+
# Labels
-def build_container_labels(label_options, service_labels, number, config_hash):
+def build_container_labels(label_options, service_labels, number, config_hash, slug):
labels = dict(label_options or {})
labels.update(label.split('=', 1) for label in service_labels)
- labels[LABEL_CONTAINER_NUMBER] = str(number)
+ if number is not None:
+ labels[LABEL_CONTAINER_NUMBER] = str(number)
+ if slug is not None:
+ labels[LABEL_SLUG] = slug
labels[LABEL_VERSION] = __version__
if config_hash:
@@ -1552,6 +1657,7 @@ def format_environment(environment):
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return '{key}={value}'.format(key=key, value=value)
+
return [format_env(*item) for item in environment.items()]
@@ -1596,3 +1702,151 @@ def convert_blkio_config(blkio_config):
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
result[field] = arr
return result
+
+
+def rewrite_build_path(path):
+ # python2 os.stat() doesn't support unicode on some UNIX, so we
+ # encode it to a bytestring to be safe
+ if not six.PY3 and not IS_WINDOWS_PLATFORM:
+ path = path.encode('utf8')
+
+ if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX):
+ path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
+
+ return path
+
+
+class _CLIBuilder(object):
+ def __init__(self, progress):
+ self._progress = progress
+
+ def build(self, path, tag=None, quiet=False, fileobj=None,
+ nocache=False, rm=False, timeout=None,
+ custom_context=False, encoding=None, pull=False,
+ forcerm=False, dockerfile=None, container_limits=None,
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None,
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
+ """
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ custom_context (bool): Optional if using ``fileobj``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ dockerfile (str): path within the build context to the Dockerfile
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+ labels (dict): A dictionary of labels to set on the image
+ network_mode (str): networking mode for the run commands during
+ build
+ nocache (bool): Don't use the cache when set to ``True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ quiet (bool): Whether to return the status
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ tag (str): A tag to add to the final image
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ timeout (int): HTTP timeout
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
+ Returns:
+ A generator for the build output.
+ """
+ if dockerfile:
+ dockerfile = os.path.join(path, dockerfile)
+ iidfile = tempfile.mktemp()
+
+ command_builder = _CommandBuilder()
+ command_builder.add_params("--build-arg", buildargs)
+ command_builder.add_list("--cache-from", cache_from)
+ command_builder.add_arg("--file", dockerfile)
+ command_builder.add_flag("--force-rm", forcerm)
+ command_builder.add_arg("--memory", container_limits.get("memory"))
+ command_builder.add_flag("--no-cache", nocache)
+ command_builder.add_arg("--progress", self._progress)
+ command_builder.add_flag("--pull", pull)
+ command_builder.add_arg("--tag", tag)
+ command_builder.add_arg("--target", target)
+ command_builder.add_arg("--iidfile", iidfile)
+ args = command_builder.build([path])
+
+ magic_word = "Successfully built "
+ appear = False
+ with subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True) as p:
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ # Fix non ascii chars on Python2. To remove when #6890 is complete.
+ if six.PY2:
+ magic_word = str(magic_word)
+ if line.startswith(magic_word):
+ appear = True
+ yield json.dumps({"stream": line})
+
+ with open(iidfile) as f:
+ line = f.readline()
+ image_id = line.split(":")[1].strip()
+ os.remove(iidfile)
+
+ # In case of `DOCKER_BUILDKIT=1`
+ # there is no success message already present in the output.
+ # Since that's the way `Service::build` gets the `image_id`
+ # it has to be added `manually`
+ if not appear:
+ yield json.dumps({"stream": "{}{}\n".format(magic_word, image_id)})
+
+
+class _CommandBuilder(object):
+ def __init__(self):
+ self._args = ["docker", "build"]
+
+ def add_arg(self, name, value):
+ if value:
+ self._args.extend([name, str(value)])
+
+ def add_flag(self, name, flag):
+ if flag:
+ self._args.extend([name])
+
+ def add_params(self, name, params):
+ if params:
+ for key, val in params.items():
+ self._args.extend([name, "{}={}".format(key, val)])
+
+ def add_list(self, name, values):
+ if values:
+ for val in values:
+ self._args.extend([name, val])
+
+ def build(self, args):
+ return self._args + args
diff --git a/compose/utils.py b/compose/utils.py
index 956673b4..a1e5e643 100644
--- a/compose/utils.py
+++ b/compose/utils.py
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
import codecs
import hashlib
-import json
import json.decoder
import logging
import ntpath
+import random
import six
from docker.errors import DockerException
@@ -151,3 +151,37 @@ def unquote_path(s):
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s
+
+
+def generate_random_id():
+ while True:
+ val = hex(random.getrandbits(32 * 8))[2:-1]
+ try:
+ int(truncate_id(val))
+ continue
+ except ValueError:
+ return val
+
+
+def truncate_id(value):
+ if ':' in value:
+ value = value[value.index(':') + 1:]
+ if len(value) > 12:
+ return value[:12]
+ return value
+
+
+def unique_everseen(iterable, key=lambda x: x):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ seen = set()
+ for element in iterable:
+ unique_key = key(element)
+ if unique_key not in seen:
+ seen.add(unique_key)
+ yield element
+
+
+def truncate_string(s, max_chars=35):
+ if len(s) > max_chars:
+ return s[:max_chars - 2] + '...'
+ return s
diff --git a/compose/volume.py b/compose/volume.py
index 6bf18404..b02fc5d8 100644
--- a/compose/volume.py
+++ b/compose/volume.py
@@ -2,15 +2,19 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import re
from docker.errors import NotFound
from docker.utils import version_lt
+from . import __version__
from .config import ConfigurationError
from .config.types import VolumeSpec
from .const import LABEL_PROJECT
+from .const import LABEL_VERSION
from .const import LABEL_VOLUME
+
log = logging.getLogger(__name__)
@@ -25,6 +29,7 @@ class Volume(object):
self.external = external
self.labels = labels
self.custom_name = custom_name
+ self.legacy = None
def create(self):
return self.client.create_volume(
@@ -33,17 +38,20 @@ class Volume(object):
def remove(self):
if self.external:
- log.info("Volume %s is external, skipping", self.full_name)
+ log.info("Volume %s is external, skipping", self.true_name)
return
- log.info("Removing volume %s", self.full_name)
- return self.client.remove_volume(self.full_name)
+ log.info("Removing volume %s", self.true_name)
+ return self.client.remove_volume(self.true_name)
- def inspect(self):
+ def inspect(self, legacy=None):
+ if legacy:
+ return self.client.inspect_volume(self.legacy_full_name)
return self.client.inspect_volume(self.full_name)
def exists(self):
+ self._set_legacy_flag()
try:
- self.inspect()
+ self.inspect(legacy=self.legacy)
except NotFound:
return False
return True
@@ -52,7 +60,22 @@ class Volume(object):
def full_name(self):
if self.custom_name:
return self.name
- return '{0}_{1}'.format(self.project, self.name)
+ return '{0}_{1}'.format(self.project.lstrip('-_'), self.name)
+
+ @property
+ def legacy_full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(
+ re.sub(r'[_-]', '', self.project), self.name
+ )
+
+ @property
+ def true_name(self):
+ self._set_legacy_flag()
+ if self.legacy:
+ return self.legacy_full_name
+ return self.full_name
@property
def _labels(self):
@@ -62,9 +85,19 @@ class Volume(object):
labels.update({
LABEL_PROJECT: self.project,
LABEL_VOLUME: self.name,
+ LABEL_VERSION: __version__,
})
return labels
+ def _set_legacy_flag(self):
+ if self.legacy is not None:
+ return
+ try:
+ data = self.inspect(legacy=True)
+ self.legacy = data is not None
+ except NotFound:
+ self.legacy = False
+
class ProjectVolumes(object):
@@ -94,7 +127,7 @@ class ProjectVolumes(object):
try:
volume.remove()
except NotFound:
- log.warn("Volume %s not found.", volume.full_name)
+ log.warning("Volume %s not found.", volume.true_name)
def initialize(self):
try:
@@ -124,7 +157,7 @@ class ProjectVolumes(object):
)
volume.create()
else:
- check_remote_volume_config(volume.inspect(), volume)
+ check_remote_volume_config(volume.inspect(legacy=volume.legacy), volume)
except NotFound:
raise ConfigurationError(
'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
@@ -136,9 +169,9 @@ class ProjectVolumes(object):
if isinstance(volume_spec, VolumeSpec):
volume = self.volumes[volume_spec.external]
- return volume_spec._replace(external=volume.full_name)
+ return volume_spec._replace(external=volume.true_name)
else:
- volume_spec.source = self.volumes[volume_spec.source].full_name
+ volume_spec.source = self.volumes[volume_spec.source].true_name
return volume_spec
@@ -152,7 +185,7 @@ class VolumeConfigChangedError(ConfigurationError):
'first:\n$ docker volume rm {full_name}'.format(
vol_name=local.name, property_name=property_name,
local_value=local_value, remote_value=remote_value,
- full_name=local.full_name
+ full_name=local.true_name
)
)
@@ -176,7 +209,7 @@ def check_remote_volume_config(remote, local):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
- log.warn(
+ log.warning(
'Volume {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.name, k)
)