summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
committerFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
commita997ae5b1840f2878b16443bd8e3c784d23ba9ac (patch)
tree35a232d0ad24ed27ccc2ad4dfa45f7c7496f55b3 /tests
Import docker-compose_1.17.1.orig.tar.gz
[dgit import orig docker-compose_1.17.1.orig.tar.gz]
Diffstat (limited to 'tests')
-rw-r--r--tests/__init__.py14
-rw-r--r--tests/acceptance/__init__.py0
-rw-r--r--tests/acceptance/cli_test.py2390
-rw-r--r--tests/fixtures/UpperCaseDir/docker-compose.yml6
-rw-r--r--tests/fixtures/abort-on-container-exit-0/docker-compose.yml6
-rw-r--r--tests/fixtures/abort-on-container-exit-1/docker-compose.yml6
-rw-r--r--tests/fixtures/build-ctx/Dockerfile3
-rw-r--r--tests/fixtures/build-path-override-dir/docker-compose.yml2
-rw-r--r--tests/fixtures/build-path/docker-compose.yml2
-rw-r--r--tests/fixtures/build-shm-size/Dockerfile4
-rw-r--r--tests/fixtures/build-shm-size/docker-compose.yml7
-rw-r--r--tests/fixtures/bundle-with-digests/docker-compose.yml9
-rw-r--r--tests/fixtures/commands-composefile/docker-compose.yml5
-rw-r--r--tests/fixtures/default-env-file/.env4
-rw-r--r--tests/fixtures/default-env-file/docker-compose.yml6
-rw-r--r--tests/fixtures/dockerfile-with-volume/Dockerfile4
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/echo-services/docker-compose.yml6
-rw-r--r--tests/fixtures/entrypoint-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/entrypoint-dockerfile/Dockerfile4
-rw-r--r--tests/fixtures/entrypoint-dockerfile/docker-compose.yml4
-rw-r--r--tests/fixtures/env-file/docker-compose.yml4
-rw-r--r--tests/fixtures/env-file/test.env1
-rw-r--r--tests/fixtures/env/one.env11
-rw-r--r--tests/fixtures/env/resolve.env4
-rw-r--r--tests/fixtures/env/two.env2
-rw-r--r--tests/fixtures/environment-composefile/docker-compose.yml7
-rw-r--r--tests/fixtures/environment-interpolation/docker-compose.yml17
-rw-r--r--tests/fixtures/exit-code-from/docker-compose.yml6
-rw-r--r--tests/fixtures/expose-composefile/docker-compose.yml11
-rw-r--r--tests/fixtures/extends/circle-1.yml12
-rw-r--r--tests/fixtures/extends/circle-2.yml12
-rw-r--r--tests/fixtures/extends/common-env-labels-ulimits.yml13
-rw-r--r--tests/fixtures/extends/common.yml7
-rw-r--r--tests/fixtures/extends/docker-compose.yml17
-rw-r--r--tests/fixtures/extends/healthcheck-1.yml9
-rw-r--r--tests/fixtures/extends/healthcheck-2.yml6
-rw-r--r--tests/fixtures/extends/invalid-links.yml11
-rw-r--r--tests/fixtures/extends/invalid-net-v2.yml12
-rw-r--r--tests/fixtures/extends/invalid-net.yml8
-rw-r--r--tests/fixtures/extends/invalid-volumes.yml9
-rw-r--r--tests/fixtures/extends/nested-intermediate.yml6
-rw-r--r--tests/fixtures/extends/nested.yml6
-rw-r--r--tests/fixtures/extends/no-file-specified.yml9
-rw-r--r--tests/fixtures/extends/nonexistent-path-base.yml6
-rw-r--r--tests/fixtures/extends/nonexistent-path-child.yml8
-rw-r--r--tests/fixtures/extends/nonexistent-service.yml4
-rw-r--r--tests/fixtures/extends/service-with-invalid-schema.yml4
-rw-r--r--tests/fixtures/extends/service-with-valid-composite-extends.yml5
-rw-r--r--tests/fixtures/extends/specify-file-as-self.yml17
-rw-r--r--tests/fixtures/extends/valid-common-config.yml6
-rw-r--r--tests/fixtures/extends/valid-common.yml3
-rw-r--r--tests/fixtures/extends/valid-composite-extends.yml2
-rw-r--r--tests/fixtures/extends/valid-interpolation-2.yml3
-rw-r--r--tests/fixtures/extends/valid-interpolation.yml5
-rw-r--r--tests/fixtures/extends/verbose-and-shorthand.yml15
-rw-r--r--tests/fixtures/healthcheck/docker-compose.yml24
-rw-r--r--tests/fixtures/invalid-composefile/invalid.yml5
-rw-r--r--tests/fixtures/links-composefile/docker-compose.yml11
-rw-r--r--tests/fixtures/logging-composefile-legacy/docker-compose.yml10
-rw-r--r--tests/fixtures/logging-composefile/docker-compose.yml14
-rw-r--r--tests/fixtures/logs-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/logs-tail-composefile/docker-compose.yml3
-rw-r--r--tests/fixtures/longer-filename-composefile/docker-compose.yaml3
-rw-r--r--tests/fixtures/multiple-composefiles/compose2.yml3
-rw-r--r--tests/fixtures/multiple-composefiles/docker-compose.yml6
-rw-r--r--tests/fixtures/net-container/docker-compose.yml7
-rw-r--r--tests/fixtures/net-container/v2-invalid.yml10
-rw-r--r--tests/fixtures/networks/bridge.yml9
-rw-r--r--tests/fixtures/networks/default-network-config.yml13
-rw-r--r--tests/fixtures/networks/docker-compose.yml21
-rw-r--r--tests/fixtures/networks/external-default.yml12
-rw-r--r--tests/fixtures/networks/external-networks.yml16
-rw-r--r--tests/fixtures/networks/missing-network.yml10
-rw-r--r--tests/fixtures/networks/network-aliases.yml16
-rwxr-xr-xtests/fixtures/networks/network-internal.yml13
-rw-r--r--tests/fixtures/networks/network-label.yml13
-rw-r--r--tests/fixtures/networks/network-mode.yml27
-rwxr-xr-xtests/fixtures/networks/network-static-addresses.yml23
-rw-r--r--tests/fixtures/no-composefile/.gitignore0
-rw-r--r--tests/fixtures/no-links-composefile/docker-compose.yml9
-rw-r--r--tests/fixtures/no-services/docker-compose.yml5
-rw-r--r--tests/fixtures/override-files/docker-compose.override.yml7
-rw-r--r--tests/fixtures/override-files/docker-compose.yml10
-rw-r--r--tests/fixtures/override-files/extra.yml10
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/pid-mode/docker-compose.yml17
-rw-r--r--tests/fixtures/ports-composefile-scale/docker-compose.yml6
-rw-r--r--tests/fixtures/ports-composefile/docker-compose.yml8
-rw-r--r--tests/fixtures/ports-composefile/expanded-notation.yml15
-rw-r--r--tests/fixtures/restart/docker-compose.yml17
-rw-r--r--tests/fixtures/run-workdir/docker-compose.yml4
-rw-r--r--tests/fixtures/scale/docker-compose.yml9
-rw-r--r--tests/fixtures/secrets/default1
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml9
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/files/example.txt1
-rw-r--r--tests/fixtures/simple-composefile/digest.yml6
-rw-r--r--tests/fixtures/simple-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/simple-composefile/ignore-pull-failures.yml6
-rw-r--r--tests/fixtures/simple-dockerfile/Dockerfile3
-rw-r--r--tests/fixtures/simple-dockerfile/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-failing-dockerfile/Dockerfile7
-rw-r--r--tests/fixtures/simple-failing-dockerfile/docker-compose.yml2
-rw-r--r--tests/fixtures/sleeps-composefile/docker-compose.yml10
-rw-r--r--tests/fixtures/stop-signal-composefile/docker-compose.yml10
-rw-r--r--tests/fixtures/tls/ca.pem0
-rw-r--r--tests/fixtures/tls/cert.pem0
-rw-r--r--tests/fixtures/tls/key.key0
-rw-r--r--tests/fixtures/top/docker-compose.yml6
-rw-r--r--tests/fixtures/unicode-environment/docker-compose.yml7
-rw-r--r--tests/fixtures/user-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/v1-config/docker-compose.yml10
-rw-r--r--tests/fixtures/v2-dependencies/docker-compose.yml13
-rw-r--r--tests/fixtures/v2-full/Dockerfile4
-rw-r--r--tests/fixtures/v2-full/docker-compose.yml24
-rw-r--r--tests/fixtures/v2-simple/docker-compose.yml8
-rw-r--r--tests/fixtures/v2-simple/links-invalid.yml10
-rw-r--r--tests/fixtures/v3-full/docker-compose.yml57
-rw-r--r--tests/fixtures/volume-path-interpolation/docker-compose.yml5
-rw-r--r--tests/fixtures/volume-path/common/services.yml5
-rw-r--r--tests/fixtures/volume-path/docker-compose.yml6
-rw-r--r--tests/fixtures/volume/docker-compose.yml11
-rw-r--r--tests/fixtures/volumes-from-container/docker-compose.yml5
-rw-r--r--tests/fixtures/volumes/docker-compose.yml2
-rw-r--r--tests/fixtures/volumes/external-volumes-v2-x.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v2.yml16
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-4.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-x.yml16
-rw-r--r--tests/fixtures/volumes/volume-label.yml13
-rw-r--r--tests/helpers.py50
-rw-r--r--tests/integration/__init__.py0
-rw-r--r--tests/integration/network_test.py17
-rw-r--r--tests/integration/project_test.py1636
-rw-r--r--tests/integration/resilience_test.py57
-rw-r--r--tests/integration/service_test.py1380
-rw-r--r--tests/integration/state_test.py308
-rw-r--r--tests/integration/testcases.py187
-rw-r--r--tests/integration/volume_test.py126
-rw-r--r--tests/unit/__init__.py0
-rw-r--r--tests/unit/bundle_test.py222
-rw-r--r--tests/unit/cli/__init__.py0
-rw-r--r--tests/unit/cli/command_test.py76
-rw-r--r--tests/unit/cli/docker_client_test.py187
-rw-r--r--tests/unit/cli/errors_test.py88
-rw-r--r--tests/unit/cli/formatter_test.py53
-rw-r--r--tests/unit/cli/log_printer_test.py201
-rw-r--r--tests/unit/cli/main_test.py104
-rw-r--r--tests/unit/cli/utils_test.py23
-rw-r--r--tests/unit/cli/verbose_proxy_test.py33
-rw-r--r--tests/unit/cli_test.py214
-rw-r--r--tests/unit/config/__init__.py0
-rw-r--r--tests/unit/config/config_test.py4482
-rw-r--r--tests/unit/config/environment_test.py40
-rw-r--r--tests/unit/config/interpolation_test.py148
-rw-r--r--tests/unit/config/sort_services_test.py243
-rw-r--r--tests/unit/config/types_test.py235
-rw-r--r--tests/unit/container_test.py198
-rw-r--r--tests/unit/network_test.py161
-rw-r--r--tests/unit/parallel_test.py163
-rw-r--r--tests/unit/progress_stream_test.py87
-rw-r--r--tests/unit/project_test.py570
-rw-r--r--tests/unit/service_test.py1146
-rw-r--r--tests/unit/split_buffer_test.py54
-rw-r--r--tests/unit/timeparse_test.py56
-rw-r--r--tests/unit/utils_test.py70
-rw-r--r--tests/unit/volume_test.py26
170 files changed, 16149 insertions, 0 deletions
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..1ac1b21c
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+
+if sys.version_info >= (2, 7):
+ import unittest # NOQA
+else:
+ import unittest2 as unittest # NOQA
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock # NOQA
diff --git a/tests/acceptance/__init__.py b/tests/acceptance/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/acceptance/__init__.py
diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py
new file mode 100644
index 00000000..bba2238e
--- /dev/null
+++ b/tests/acceptance/cli_test.py
@@ -0,0 +1,2390 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+import json
+import os
+import os.path
+import re
+import signal
+import subprocess
+import time
+from collections import Counter
+from collections import namedtuple
+from operator import attrgetter
+
+import pytest
+import six
+import yaml
+from docker import errors
+
+from .. import mock
+from ..helpers import create_host_file
+from compose.cli.command import get_project
+from compose.config.errors import DuplicateOverrideFileFound
+from compose.container import Container
+from compose.project import OneOffFilter
+from compose.utils import nanoseconds_from_time_seconds
+from tests.integration.testcases import DockerClientTestCase
+from tests.integration.testcases import get_links
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import pull_busybox
+from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
+
+
+BUILD_CACHE_TEXT = 'Using cache'
+BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
+
+
+def start_process(base_dir, options):
+ proc = subprocess.Popen(
+ ['docker-compose'] + options,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=base_dir)
+ print("Running process: %s" % proc.pid)
+ return proc
+
+
+def wait_on_process(proc, returncode=0):
+ stdout, stderr = proc.communicate()
+ if proc.returncode != returncode:
+ print("Stderr: {}".format(stderr))
+ print("Stdout: {}".format(stdout))
+ assert proc.returncode == returncode
+ return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def kill_service(service):
+ for container in service.containers():
+ if container.is_running:
+ container.kill()
+
+
+class ContainerCountCondition(object):
+
+ def __init__(self, project, expected):
+ self.project = project
+ self.expected = expected
+
+ def __call__(self):
+ return len([c for c in self.project.containers() if c.is_running]) == self.expected
+
+ def __str__(self):
+ return "waiting for counter count == %s" % self.expected
+
+
+class ContainerStateCondition(object):
+
+ def __init__(self, client, name, status):
+ self.client = client
+ self.name = name
+ self.status = status
+
+ def __call__(self):
+ try:
+ container = self.client.inspect_container(self.name)
+ return container['State']['Status'] == self.status
+ except errors.APIError:
+ return False
+
+ def __str__(self):
+ return "waiting for container to be %s" % self.status
+
+
+class CLITestCase(DockerClientTestCase):
+
+ def setUp(self):
+ super(CLITestCase, self).setUp()
+ self.base_dir = 'tests/fixtures/simple-composefile'
+ self.override_dir = None
+
+ def tearDown(self):
+ if self.base_dir:
+ self.project.kill()
+ self.project.down(None, True)
+
+ for container in self.project.containers(stopped=True, one_off=OneOffFilter.only):
+ container.remove(force=True)
+ networks = self.client.networks()
+ for n in networks:
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
+ self.client.remove_network(n['Name'])
+ volumes = self.client.volumes().get('Volumes') or []
+ for v in volumes:
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
+ self.client.remove_volume(v['Name'])
+ if hasattr(self, '_project'):
+ del self._project
+
+ super(CLITestCase, self).tearDown()
+
+ @property
+ def project(self):
+ # Hack: allow project to be overridden
+ if not hasattr(self, '_project'):
+ self._project = get_project(self.base_dir, override_dir=self.override_dir)
+ return self._project
+
+ def dispatch(self, options, project_options=None, returncode=0):
+ project_options = project_options or []
+ proc = start_process(self.base_dir, project_options + options)
+ return wait_on_process(proc, returncode=returncode)
+
+ def execute(self, container, cmd):
+ # Remove once Hijack and CloseNotifier sign a peace treaty
+ self.client.close()
+ exc = self.client.exec_create(container.id, cmd)
+ self.client.exec_start(exc)
+ return self.client.exec_inspect(exc)['ExitCode']
+
+ def lookup(self, container, hostname):
+ return self.execute(container, ["nslookup", hostname]) == 0
+
+ def test_help(self):
+ self.base_dir = 'tests/fixtures/no-composefile'
+ result = self.dispatch(['help', 'up'], returncode=0)
+ assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
+ # Prevent tearDown from trying to create a project
+ self.base_dir = None
+
+ def test_help_nonexistent(self):
+ self.base_dir = 'tests/fixtures/no-composefile'
+ result = self.dispatch(['help', 'foobar'], returncode=1)
+ assert 'No such command' in result.stderr
+ self.base_dir = None
+
+ def test_shorthand_host_opt(self):
+ self.dispatch(
+ ['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
+ 'up', '-d'],
+ returncode=0
+ )
+
+ def test_host_not_reachable(self):
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
+ def test_host_not_reachable_volumes_from_container(self):
+ self.base_dir = 'tests/fixtures/volumes-from-container'
+
+ container = self.client.create_container(
+ 'busybox', 'true', name='composetest_data_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, container)
+
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
+ def test_config_list_services(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--services'])
+ assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
+
+ def test_config_list_volumes(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--volumes'])
+ assert set(result.stdout.rstrip().split('\n')) == {'data'}
+
+ def test_config_quiet_with_error(self):
+ self.base_dir = None
+ result = self.dispatch([
+ '-f', 'tests/fixtures/invalid-composefile/invalid.yml',
+ 'config', '-q'
+ ], returncode=1)
+ assert "'notaservice' must be a mapping" in result.stderr
+
+ def test_config_quiet(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ assert self.dispatch(['config', '-q']).stdout == ''
+
+ def test_config_default(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config'])
+ # assert there are no python objects encoded in the output
+ assert '!!' not in result.stdout
+
+ output = yaml.load(result.stdout)
+ expected = {
+ 'version': '2.0',
+ 'volumes': {'data': {'driver': 'local'}},
+ 'networks': {'front': {}},
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': os.path.abspath(self.base_dir),
+ },
+ 'networks': {'front': None, 'default': None},
+ 'volumes_from': ['service:other:rw'],
+ },
+ 'other': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'volumes': ['/data'],
+ },
+ },
+ }
+ assert output == expected
+
+ def test_config_restart(self):
+ self.base_dir = 'tests/fixtures/restart'
+ result = self.dispatch(['config'])
+ assert yaml.load(result.stdout) == {
+ 'version': '2.0',
+ 'services': {
+ 'never': {
+ 'image': 'busybox',
+ 'restart': 'no',
+ },
+ 'always': {
+ 'image': 'busybox',
+ 'restart': 'always',
+ },
+ 'on-failure': {
+ 'image': 'busybox',
+ 'restart': 'on-failure',
+ },
+ 'on-failure-5': {
+ 'image': 'busybox',
+ 'restart': 'on-failure:5',
+ },
+ 'restart-null': {
+ 'image': 'busybox',
+ 'restart': ''
+ },
+ },
+ }
+
+ def test_config_external_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+ result = self.dispatch(['-f', 'external-networks.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'networks' in json_result
+ assert json_result['networks'] == {
+ 'networks_foo': {
+ 'external': True # {'name': 'networks_foo'}
+ },
+ 'bar': {
+ 'external': {'name': 'networks_bar'}
+ }
+ }
+
+ def test_config_external_volume_v2(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v2_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
+ def test_config_external_volume_v3_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v3_4(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
+ def test_config_v1(self):
+ self.base_dir = 'tests/fixtures/v1-config'
+ result = self.dispatch(['config'])
+ assert yaml.load(result.stdout) == {
+ 'version': '2.1',
+ 'services': {
+ 'net': {
+ 'image': 'busybox',
+ 'network_mode': 'bridge',
+ },
+ 'volume': {
+ 'image': 'busybox',
+ 'volumes': ['/data'],
+ 'network_mode': 'bridge',
+ },
+ 'app': {
+ 'image': 'busybox',
+ 'volumes_from': ['service:volume:rw'],
+ 'network_mode': 'service:net',
+ },
+ },
+ }
+
+ @v3_only()
+ def test_config_v3(self):
+ self.base_dir = 'tests/fixtures/v3-full'
+ result = self.dispatch(['config'])
+
+ assert yaml.load(result.stdout) == {
+ 'version': '3.2',
+ 'volumes': {
+ 'foobar': {
+ 'labels': {
+ 'com.docker.compose.test': 'true',
+ },
+ },
+ },
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'replicated',
+ 'replicas': 6,
+ 'labels': ['FOO=BAR'],
+ 'update_config': {
+ 'parallelism': 3,
+ 'delay': '10s',
+ 'failure_action': 'continue',
+ 'monitor': '60s',
+ 'max_failure_ratio': 0.3,
+ },
+ 'resources': {
+ 'limits': {
+ 'cpus': '0.001',
+ 'memory': '50M',
+ },
+ 'reservations': {
+ 'cpus': '0.0001',
+ 'memory': '20M',
+ },
+ },
+ 'restart_policy': {
+ 'condition': 'on_failure',
+ 'delay': '5s',
+ 'max_attempts': 3,
+ 'window': '120s',
+ },
+ 'placement': {
+ 'constraints': ['node=foo'],
+ },
+ },
+
+ 'healthcheck': {
+ 'test': 'cat /etc/passwd',
+ 'interval': '10s',
+ 'timeout': '1s',
+ 'retries': 5,
+ },
+ 'volumes': [
+ '/host/path:/container/path:ro',
+ 'foobar:/container/volumepath:rw',
+ '/anonymous',
+ 'foobar:/container/volumepath2:nocopy'
+ ],
+
+ 'stop_grace_period': '20s',
+ },
+ },
+ }
+
+ def test_ps(self):
+ self.project.get_service('simple').create_container()
+ result = self.dispatch(['ps'])
+ assert 'simplecomposefile_simple_1' in result.stdout
+
+ def test_ps_default_composefile(self):
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['ps'])
+
+ self.assertIn('multiplecomposefiles_simple_1', result.stdout)
+ self.assertIn('multiplecomposefiles_another_1', result.stdout)
+ self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
+
+ def test_ps_alternate_composefile(self):
+ config_path = os.path.abspath(
+ 'tests/fixtures/multiple-composefiles/compose2.yml')
+ self._project = get_project(self.base_dir, [config_path])
+
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
+ result = self.dispatch(['-f', 'compose2.yml', 'ps'])
+
+ self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
+ self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
+ self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
+
+ def test_pull(self):
+ result = self.dispatch(['pull'])
+ assert sorted(result.stderr.split('\n'))[1:] == [
+ 'Pulling another (busybox:latest)...',
+ 'Pulling simple (busybox:latest)...',
+ ]
+
+ def test_pull_with_digest(self):
+ result = self.dispatch(['-f', 'digest.yml', 'pull'])
+
+ assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert ('Pulling digest (busybox@'
+ 'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
+ '04ee8502d)...') in result.stderr
+
+ def test_pull_with_ignore_pull_failures(self):
+ result = self.dispatch([
+ '-f', 'ignore-pull-failures.yml',
+ 'pull', '--ignore-pull-failures']
+ )
+
+ assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
+ assert ('repository nonexisting-image not found' in result.stderr or
+ 'image library/nonexisting-image:latest not found' in result.stderr or
+ 'pull access denied for nonexisting-image' in result.stderr)
+
+ def test_pull_with_parallel_failure(self):
+ result = self.dispatch([
+ '-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
+ returncode=1
+ )
+
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
+ re.MULTILINE))
+
+ def test_pull_with_quiet(self):
+ assert self.dispatch(['pull', '--quiet']).stderr == ''
+ assert self.dispatch(['pull', '--quiet']).stdout == ''
+
+ def test_build_plain(self):
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', 'simple'])
+ assert BUILD_PULL_TEXT not in result.stdout
+
+ def test_build_no_cache(self):
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', '--no-cache', 'simple'])
+ assert BUILD_CACHE_TEXT not in result.stdout
+ assert BUILD_PULL_TEXT not in result.stdout
+
+ def test_build_pull(self):
+ # Make sure we have the latest busybox already
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'], None)
+
+ result = self.dispatch(['build', '--pull', 'simple'])
+ if not is_cluster(self.client):
+ # If previous build happened on another node, cache won't be available
+ assert BUILD_CACHE_TEXT in result.stdout
+ assert BUILD_PULL_TEXT in result.stdout
+
+ def test_build_no_cache_pull(self):
+ # Make sure we have the latest busybox already
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', '--no-cache', '--pull', 'simple'])
+ assert BUILD_CACHE_TEXT not in result.stdout
+ assert BUILD_PULL_TEXT in result.stdout
+
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
+ def test_build_failed(self):
+ self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
+ self.dispatch(['build', 'simple'], returncode=1)
+
+ labels = ["com.docker.compose.test_failing_image=true"]
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(
+ all=True,
+ filters={"label": labels})
+ ]
+ assert len(containers) == 1
+
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
+ def test_build_failed_forcerm(self):
+ self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
+ self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
+
+ labels = ["com.docker.compose.test_failing_image=true"]
+
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(
+ all=True,
+ filters={"label": labels})
+ ]
+ assert not containers
+
+ def test_build_shm_size_build_option(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-shm-size'
+ result = self.dispatch(['build', '--no-cache'], None)
+ assert 'shm_size: 96' in result.stdout
+
+ def test_bundle_with_digests(self):
+ self.base_dir = 'tests/fixtures/bundle-with-digests/'
+ tmpdir = pytest.ensuretemp('cli_test_bundle')
+ self.addCleanup(tmpdir.remove)
+ filename = str(tmpdir.join('example.dab'))
+
+ self.dispatch(['bundle', '--output', filename])
+ with open(filename, 'r') as fh:
+ bundle = json.load(fh)
+
+ assert bundle == {
+ 'Version': '0.1',
+ 'Services': {
+ 'web': {
+ 'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
+ '44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
+ 'Networks': ['default'],
+ },
+ 'redis': {
+ 'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
+ '374b2b7392de1e7d77be26ef8f7b'),
+ 'Networks': ['default'],
+ }
+ },
+ }
+
+ def test_build_override_dir(self):
+ self.base_dir = 'tests/fixtures/build-path-override-dir'
+ self.override_dir = os.path.abspath('tests/fixtures')
+ result = self.dispatch([
+ '--project-directory', self.override_dir,
+ 'build'])
+
+ assert 'Successfully built' in result.stdout
+
+ def test_build_override_dir_invalid_path(self):
+ config_path = os.path.abspath('tests/fixtures/build-path-override-dir/docker-compose.yml')
+ result = self.dispatch([
+ '-f', config_path,
+ 'build'], returncode=1)
+
+ assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
+
+ def test_create(self):
+ self.dispatch(['create'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ service_containers = service.containers(stopped=True)
+ another_containers = another.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert len(another_containers) == 1
+ assert not service_containers[0].is_running
+ assert not another_containers[0].is_running
+
+ def test_create_with_force_recreate(self):
+ self.dispatch(['create'], None)
+ service = self.project.get_service('simple')
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ old_ids = [c.id for c in service.containers(stopped=True)]
+
+ self.dispatch(['create', '--force-recreate'], None)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ new_ids = [c.id for c in service_containers]
+
+ assert old_ids != new_ids
+
+ def test_create_with_no_recreate(self):
+ self.dispatch(['create'], None)
+ service = self.project.get_service('simple')
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ old_ids = [c.id for c in service.containers(stopped=True)]
+
+ self.dispatch(['create', '--no-recreate'], None)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ new_ids = [c.id for c in service_containers]
+
+ assert old_ids == new_ids
+
+ def test_run_one_off_with_volume(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mount = container_data.get('Mounts')[0]
+ assert mount['Source'] == volume_path
+ assert mount['Destination'] == '/data'
+ assert mount['Type'] == 'bind'
+
+ def test_run_one_off_with_multiple_volumes(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f' '/data1/example.txt'
+ ], returncode=0)
+
+ def test_run_one_off_with_volume_merge(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ '-f', 'docker-compose.merge.yml',
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mounts = container_data.get('Mounts')
+ assert len(mounts) == 2
+ config_mount = [m for m in mounts if m['Destination'] == '/data1'][0]
+ override_mount = [m for m in mounts if m['Destination'] == '/data'][0]
+
+ assert config_mount['Type'] == 'volume'
+ assert override_mount['Source'] == volume_path
+ assert override_mount['Type'] == 'bind'
+
+ def test_create_with_force_recreate_and_no_recreate(self):
+ self.dispatch(
+ ['create', '--force-recreate', '--no-recreate'],
+ returncode=1)
+
+ def test_down_invalid_rmi_flag(self):
+ result = self.dispatch(['down', '--rmi', 'bogus'], returncode=1)
+ assert '--rmi flag must be' in result.stderr
+
+ @v2_only()
+ def test_down(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+
+ self.dispatch(['up', '-d'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ self.dispatch(['run', 'web', 'true'])
+ self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null'])
+ assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2
+
+ result = self.dispatch(['down', '--rmi=local', '--volumes'])
+ assert 'Stopping v2full_web_1' in result.stderr
+ assert 'Stopping v2full_other_1' in result.stderr
+ assert 'Stopping v2full_web_run_2' in result.stderr
+ assert 'Removing v2full_web_1' in result.stderr
+ assert 'Removing v2full_other_1' in result.stderr
+ assert 'Removing v2full_web_run_1' in result.stderr
+ assert 'Removing v2full_web_run_2' in result.stderr
+ assert 'Removing volume v2full_data' in result.stderr
+ assert 'Removing image v2full_web' in result.stderr
+ assert 'Removing image busybox' not in result.stderr
+ assert 'Removing network v2full_default' in result.stderr
+ assert 'Removing network v2full_front' in result.stderr
+
+ def test_up_detached(self):
+ self.dispatch(['up', '-d'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertEqual(len(another.containers()), 1)
+
+ # Ensure containers don't have stdin and stdout connected in -d mode
+ container, = service.containers()
+ self.assertFalse(container.get('Config.AttachStderr'))
+ self.assertFalse(container.get('Config.AttachStdout'))
+ self.assertFalse(container.get('Config.AttachStdin'))
+
+ def test_up_attached(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ result = self.dispatch(['up', '--no-color'])
+
+ assert 'simple_1 | simple' in result.stdout
+ assert 'another_1 | another' in result.stdout
+ assert 'simple_1 exited with code 0' in result.stdout
+ assert 'another_1 exited with code 0' in result.stdout
+
+ @v2_only()
+ def test_up(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ self.dispatch(['up', '-d'], None)
+
+ services = self.project.get_services()
+
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+ self.assertEqual(len(networks), 1)
+ assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
+ assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
+
+ network = self.client.inspect_network(networks[0]['Id'])
+
+ for service in services:
+ containers = service.containers()
+ self.assertEqual(len(containers), 1)
+
+ container = containers[0]
+ self.assertIn(container.id, network['Containers'])
+
+ networks = container.get('NetworkSettings.Networks')
+ self.assertEqual(list(networks), [network['Name']])
+
+ self.assertEqual(
+ sorted(networks[network['Name']]['Aliases']),
+ sorted([service.name, container.short_id]))
+
+ for service in services:
+ assert self.lookup(container, service.name)
+
+ @v2_only()
+ def test_up_no_start(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ self.dispatch(['up', '--no-start'], None)
+
+ services = self.project.get_services()
+
+ default_network = self.project.networks.networks['default'].full_name
+ front_network = self.project.networks.networks['front'].full_name
+ networks = self.client.networks(names=[default_network, front_network])
+ assert len(networks) == 2
+
+ for service in services:
+ containers = service.containers(stopped=True)
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert not container.is_running
+ assert container.get('State.Status') == 'created'
+
+ volumes = self.project.volumes.volumes
+ assert 'data' in volumes
+ volume = volumes['data']
+
+ # The code below is a Swarm-compatible equivalent to volume.exists()
+ remote_volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1] == volume.full_name
+ ]
+ assert len(remote_volumes) > 0
+
+ @v2_only()
+ def test_up_no_ansi(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ result = self.dispatch(['--no-ansi', 'up', '-d'], None)
+ assert "%c[2K\r" % 27 not in result.stderr
+ assert "%c[1A" % 27 not in result.stderr
+ assert "%c[1B" % 27 not in result.stderr
+
+ @v2_only()
+ def test_up_with_default_network_config(self):
+ filename = 'default-network-config.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+
+ assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false'
+
+ @v2_only()
+ def test_up_with_network_aliases(self):
+ filename = 'network-aliases.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ back_name = '{}_back'.format(self.project.name)
+ front_name = '{}_front'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # Two networks were created: back and front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
+ web_container = self.project.get_service('web').containers()[0]
+
+ back_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(back_name)
+ )
+ assert 'web' in back_aliases
+ front_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(front_name)
+ )
+ assert 'web' in front_aliases
+ assert 'forward_facing' in front_aliases
+ assert 'ahead' in front_aliases
+
+ @v2_only()
+ def test_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ filename = 'network-internal.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ internal_net = '{}_internal'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # One network was created: internal
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net]
+
+ assert networks[0]['Internal'] is True
+
+ @v2_only()
+ def test_up_with_network_static_addresses(self):
+ filename = 'network-static-addresses.yml'
+ ipv4_address = '172.16.100.100'
+ ipv6_address = 'fe80::1001:100'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ static_net = '{}_static_test'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # One networks was created: front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net]
+ web_container = self.project.get_service('web').containers()[0]
+
+ ipam_config = web_container.get(
+ 'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net)
+ )
+ assert ipv4_address in ipam_config.values()
+ assert ipv6_address in ipam_config.values()
+
+ @v2_only()
+ def test_up_with_networks(self):
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['up', '-d'], None)
+
+ back_name = '{}_back'.format(self.project.name)
+ front_name = '{}_front'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # Two networks were created: back and front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
+
+ # lookup by ID instead of name in case of duplicates
+ back_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == back_name][0]['Id']
+ )
+ front_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == front_name][0]['Id']
+ )
+
+ web_container = self.project.get_service('web').containers()[0]
+ app_container = self.project.get_service('app').containers()[0]
+ db_container = self.project.get_service('db').containers()[0]
+
+ for net_name in [front_name, back_name]:
+ links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name))
+ assert '{}:database'.format(db_container.name) in links
+
+ # db and app joined the back network
+ assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id])
+
+ # web and app joined the front network
+ assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id])
+
+ # web can see app but not db
+ assert self.lookup(web_container, "app")
+ assert not self.lookup(web_container, "db")
+
+ # app can see db
+ assert self.lookup(app_container, "db")
+
+ # app has aliased db to "database"
+ assert self.lookup(app_container, "database")
+
+ @v2_only()
+ def test_up_missing_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+
+ result = self.dispatch(
+ ['-f', 'missing-network.yml', 'up', '-d'],
+ returncode=1)
+
+ assert 'Service "web" uses an undefined network "foo"' in result.stderr
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_up_with_network_mode(self):
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_network_mode_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, c, force=True)
+ self.client.start(c)
+ container_mode_source = 'container:{}'.format(c['Id'])
+
+ filename = 'network-mode.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ for name in ['bridge', 'host', 'none']:
+ container = self.project.get_service(name).containers()[0]
+ assert list(container.get('NetworkSettings.Networks')) == [name]
+ assert container.get('HostConfig.NetworkMode') == name
+
+ service_mode_source = 'container:{}'.format(
+ self.project.get_service('bridge').containers()[0].id)
+ service_mode_container = self.project.get_service('service').containers()[0]
+ assert not service_mode_container.get('NetworkSettings.Networks')
+ assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source
+
+ container_mode_container = self.project.get_service('container').containers()[0]
+ assert not container_mode_container.get('NetworkSettings.Networks')
+ assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source
+
+ @v2_only()
+ def test_up_external_networks(self):
+ filename = 'external-networks.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
+ assert 'declared as external, but could not be found' in result.stderr
+
+ networks = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']]
+ for name in network_names:
+ self.client.create_network(name, attachable=True)
+
+ self.dispatch(['-f', filename, 'up', '-d'])
+ container = self.project.containers()[0]
+ assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names)
+
+ @v2_only()
+ def test_up_with_external_default_network(self):
+ filename = 'external-default.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
+ assert 'declared as external, but could not be found' in result.stderr
+
+ networks = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ network_name = 'composetest_external_network'
+ self.client.create_network(network_name, attachable=True)
+
+ self.dispatch(['-f', filename, 'up', '-d'])
+ container = self.project.containers()[0]
+ assert list(container.get('NetworkSettings.Networks')) == [network_name]
+
+ @v2_1_only()
+ def test_up_with_network_labels(self):
+ filename = 'network-label.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ network_with_label = '{}_network_with_label'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_1_only()
+ def test_up_with_volume_labels(self):
+ filename = 'volume-label.yml'
+
+ self.base_dir = 'tests/fixtures/volumes'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ volume_with_label = '{}_volume_with_label'.format(self.project.name)
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_up_no_services(self):
+ self.base_dir = 'tests/fixtures/no-services'
+ self.dispatch(['up', '-d'], None)
+
+ network_names = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert network_names == []
+
+ def test_up_with_links_v1(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'web'], None)
+
+ # No network was created
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+ assert networks == []
+
+ web = self.project.get_service('web')
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+
+ # console was not started
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ # web has links
+ web_container = web.containers()[0]
+ self.assertTrue(web_container.get('HostConfig.Links'))
+
+ def test_up_with_net_is_invalid(self):
+ self.base_dir = 'tests/fixtures/net-container'
+
+ result = self.dispatch(
+ ['-f', 'v2-invalid.yml', 'up', '-d'],
+ returncode=1)
+
+ assert "Unsupported config option for services.bar: 'net'" in result.stderr
+
+ @no_cluster("Legacy networking not supported on Swarm")
+ def test_up_with_net_v1(self):
+ self.base_dir = 'tests/fixtures/net-container'
+ self.dispatch(['up', '-d'], None)
+
+ bar = self.project.get_service('bar')
+ bar_container = bar.containers()[0]
+
+ foo = self.project.get_service('foo')
+ foo_container = foo.containers()[0]
+
+ assert foo_container.get('HostConfig.NetworkMode') == \
+ 'container:{}'.format(bar_container.id)
+
+ @v3_only()
+ def test_up_with_healthcheck(self):
+ def wait_on_health_status(container, status):
+ def condition():
+ container.inspect()
+ return container.get('State.Health.Status') == status
+
+ return wait_on_condition(condition, delay=0.5)
+
+ self.base_dir = 'tests/fixtures/healthcheck'
+ self.dispatch(['up', '-d'], None)
+
+ passes = self.project.get_service('passes')
+ passes_container = passes.containers()[0]
+
+ assert passes_container.get('Config.Healthcheck') == {
+ "Test": ["CMD-SHELL", "/bin/true"],
+ "Interval": nanoseconds_from_time_seconds(1),
+ "Timeout": nanoseconds_from_time_seconds(30 * 60),
+ "Retries": 1,
+ }
+
+ wait_on_health_status(passes_container, 'healthy')
+
+ fails = self.project.get_service('fails')
+ fails_container = fails.containers()[0]
+
+ assert fails_container.get('Config.Healthcheck') == {
+ "Test": ["CMD", "/bin/false"],
+ "Interval": nanoseconds_from_time_seconds(2.5),
+ "Retries": 2,
+ }
+
+ wait_on_health_status(fails_container, 'unhealthy')
+
+ disabled = self.project.get_service('disabled')
+ disabled_container = disabled.containers()[0]
+
+ assert disabled_container.get('Config.Healthcheck') == {
+ "Test": ["NONE"],
+ }
+
+ assert 'Health' not in disabled_container.get('State')
+
+ def test_up_with_no_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', '--no-deps', 'web'], None)
+ web = self.project.get_service('web')
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 0)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_up_with_force_recreate(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+
+ old_ids = [c.id for c in service.containers()]
+
+ self.dispatch(['up', '-d', '--force-recreate'], None)
+ self.assertEqual(len(service.containers()), 1)
+
+ new_ids = [c.id for c in service.containers()]
+
+ self.assertNotEqual(old_ids, new_ids)
+
+ def test_up_with_no_recreate(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+
+ old_ids = [c.id for c in service.containers()]
+
+ self.dispatch(['up', '-d', '--no-recreate'], None)
+ self.assertEqual(len(service.containers()), 1)
+
+ new_ids = [c.id for c in service.containers()]
+
+ self.assertEqual(old_ids, new_ids)
+
+ def test_up_with_force_recreate_and_no_recreate(self):
+ self.dispatch(
+ ['up', '-d', '--force-recreate', '--no-recreate'],
+ returncode=1)
+
+ def test_up_with_timeout(self):
+ self.dispatch(['up', '-d', '-t', '1'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertEqual(len(another.containers()), 1)
+
+ # Ensure containers don't have stdin and stdout connected in -d mode
+ config = service.containers()[0].inspect()['Config']
+ self.assertFalse(config['AttachStderr'])
+ self.assertFalse(config['AttachStdout'])
+ self.assertFalse(config['AttachStdin'])
+
+ def test_up_handles_sigint(self):
+ proc = start_process(self.base_dir, ['up', '-t', '2'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ def test_up_handles_sigterm(self):
+ proc = start_process(self.base_dir, ['up', '-t', '2'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ @v2_only()
+ def test_up_handles_force_shutdown(self):
+ self.base_dir = 'tests/fixtures/sleeps-composefile'
+ proc = start_process(self.base_dir, ['up', '-t', '200'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ time.sleep(0.1)
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ def test_up_handles_abort_on_container_exit(self):
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-0'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 0)
+
+ def test_up_handles_abort_on_container_exit_code(self):
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 1)
+
+ @v2_only()
+ @no_cluster('Container PID mode does not work across clusters')
+ def test_up_with_pid_mode(self):
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_pid_mode_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, c, force=True)
+ self.client.start(c)
+ container_mode_source = 'container:{}'.format(c['Id'])
+
+ self.base_dir = 'tests/fixtures/pid-mode'
+
+ self.dispatch(['up', '-d'], None)
+
+ service_mode_source = 'container:{}'.format(
+ self.project.get_service('container').containers()[0].id)
+ service_mode_container = self.project.get_service('service').containers()[0]
+ assert service_mode_container.get('HostConfig.PidMode') == service_mode_source
+
+ container_mode_container = self.project.get_service('container').containers()[0]
+ assert container_mode_container.get('HostConfig.PidMode') == container_mode_source
+
+ host_mode_container = self.project.get_service('host').containers()[0]
+ assert host_mode_container.get('HostConfig.PidMode') == 'host'
+
+ def test_exec_without_tty(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'console'])
+ self.assertEqual(len(self.project.containers()), 1)
+
+ stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
+ self.assertEqual(stderr, "")
+ self.assertEqual(stdout, "/\n")
+
+ def test_exec_custom_user(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'console'])
+ self.assertEqual(len(self.project.containers()), 1)
+
+ stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
+ self.assertEqual(stdout, "operator\n")
+ self.assertEqual(stderr, "")
+
+ def test_run_service_without_links(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', 'console', '/bin/true'])
+ self.assertEqual(len(self.project.containers()), 0)
+
+ # Ensure stdin/out was open
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ config = container.inspect()['Config']
+ self.assertTrue(config['AttachStderr'])
+ self.assertTrue(config['AttachStdout'])
+ self.assertTrue(config['AttachStdin'])
+
+ def test_run_service_with_links(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ @v2_only()
+ def test_run_service_with_dependencies(self):
+ self.base_dir = 'tests/fixtures/v2-dependencies'
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_run_service_with_scaled_dependencies(self):
+ self.base_dir = 'tests/fixtures/v2-dependencies'
+ self.dispatch(['up', '-d', '--scale', 'db=2', '--scale', 'console=0'])
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+
+ def test_run_with_no_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
+ db = self.project.get_service('db')
+ self.assertEqual(len(db.containers()), 0)
+
+ def test_run_does_not_recreate_linked_containers(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'db'])
+ db = self.project.get_service('db')
+ self.assertEqual(len(db.containers()), 1)
+
+ old_ids = [c.id for c in db.containers()]
+
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ self.assertEqual(len(db.containers()), 1)
+
+ new_ids = [c.id for c in db.containers()]
+
+ self.assertEqual(old_ids, new_ids)
+
+ def test_run_without_command(self):
+ self.base_dir = 'tests/fixtures/commands-composefile'
+ self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
+
+ self.dispatch(['run', 'implicit'])
+ service = self.project.get_service('implicit')
+ containers = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(
+ [c.human_readable_command for c in containers],
+ [u'/bin/sh -c echo "success"'],
+ )
+
+ self.dispatch(['run', 'explicit'])
+ service = self.project.get_service('explicit')
+ containers = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(
+ [c.human_readable_command for c in containers],
+ [u'/bin/true'],
+ )
+
+ @pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
+ def test_run_rm(self):
+ self.base_dir = 'tests/fixtures/volume'
+ proc = start_process(self.base_dir, ['run', '--rm', 'test'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'volume_test_run_1',
+ 'running'))
+ service = self.project.get_service('test')
+ containers = service.containers(one_off=OneOffFilter.only)
+ self.assertEqual(len(containers), 1)
+ mounts = containers[0].get('Mounts')
+ for mount in mounts:
+ if mount['Destination'] == '/container-path':
+ anonymous_name = mount['Name']
+ break
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_process(proc, 1)
+
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ volumes = self.client.volumes()['Volumes']
+ assert volumes is not None
+ for volume in service.options.get('volumes'):
+ if volume.internal == '/container-named-path':
+ name = volume.external
+ break
+ volume_names = [v['Name'].split('/')[-1] for v in volumes]
+ assert name in volume_names
+ assert anonymous_name not in volume_names
+
+ def test_run_service_with_dockerfile_entrypoint(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['printf']
+ assert container.get('Config.Cmd') == ['default', 'args']
+
+ def test_run_service_with_dockerfile_entrypoint_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert not container.get('Config.Cmd')
+
+ def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['foo']
+
+ def test_run_service_with_compose_file_entrypoint(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['printf']
+ assert container.get('Config.Cmd') == ['default', 'args']
+
+ def test_run_service_with_compose_file_entrypoint_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert not container.get('Config.Cmd')
+
+ def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['foo']
+
+ def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['']
+
+ def test_run_service_with_user_overridden(self):
+ self.base_dir = 'tests/fixtures/user-composefile'
+ name = 'service'
+ user = 'sshd'
+ self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ self.assertEqual(user, container.get('Config.User'))
+
+ def test_run_service_with_user_overridden_short_form(self):
+ self.base_dir = 'tests/fixtures/user-composefile'
+ name = 'service'
+ user = 'sshd'
+ self.dispatch(['run', '-u', user, name], returncode=1)
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ self.assertEqual(user, container.get('Config.User'))
+
+ def test_run_service_with_environment_overridden(self):
+ name = 'service'
+ self.base_dir = 'tests/fixtures/environment-composefile'
+ self.dispatch([
+ 'run', '-e', 'foo=notbar',
+ '-e', 'allo=moto=bobo',
+ '-e', 'alpha=beta',
+ name,
+ '/bin/true',
+ ])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ # env overridden
+ self.assertEqual('notbar', container.environment['foo'])
+ # keep environment from yaml
+ self.assertEqual('world', container.environment['hello'])
+ # added option from command line
+ self.assertEqual('beta', container.environment['alpha'])
+ # make sure a value with a = don't crash out
+ self.assertEqual('moto=bobo', container.environment['allo'])
+
+ def test_run_service_without_map_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_random = container.get_local_port(3000)
+ port_assigned = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ self.assertEqual(port_random, None)
+ self.assertEqual(port_assigned, None)
+
+ def test_run_service_with_map_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', '--service-ports', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_random = container.get_local_port(3000)
+ port_assigned = container.get_local_port(3001)
+ port_range = container.get_local_port(3002), container.get_local_port(3003)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ assert port_random is not None
+ assert port_assigned.endswith(':49152')
+ assert port_range[0].endswith(':49153')
+ assert port_range[1].endswith(':49154')
+
+ def test_run_service_with_explicitly_mapped_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_short = container.get_local_port(3000)
+ port_full = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ assert port_short.endswith(':30000')
+ assert port_full.endswith(':30001')
+
+ def test_run_service_with_explicitly_mapped_ip_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch([
+ 'run', '-d',
+ '-p', '127.0.0.1:30000:3000',
+ '--publish', '127.0.0.1:30001:3001',
+ 'simple'
+ ])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_short = container.get_local_port(3000)
+ port_full = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ self.assertEqual(port_short, "127.0.0.1:30000")
+ self.assertEqual(port_full, "127.0.0.1:30001")
+
+ def test_run_with_expose_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/expose-composefile'
+ self.dispatch(['run', '-d', '--service-ports', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ ports = container.ports
+ self.assertEqual(len(ports), 9)
+ # exposed ports are not mapped to host ports
+ assert ports['3000/tcp'] is None
+ assert ports['3001/tcp'] is None
+ assert ports['3001/udp'] is None
+ assert ports['3002/tcp'] is None
+ assert ports['3003/tcp'] is None
+ assert ports['3004/tcp'] is None
+ assert ports['3005/tcp'] is None
+ assert ports['3006/udp'] is None
+ assert ports['3007/udp'] is None
+
+ # close all one off containers we just created
+ container.stop()
+
+ def test_run_with_custom_name(self):
+ self.base_dir = 'tests/fixtures/environment-composefile'
+ name = 'the-container-name'
+ self.dispatch(['run', '--name', name, 'service', '/bin/true'])
+
+ service = self.project.get_service('service')
+ container, = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(container.name, name)
+
+ def test_run_service_with_workdir_overridden(self):
+ self.base_dir = 'tests/fixtures/run-workdir'
+ name = 'service'
+ workdir = '/var'
+ self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=True)[0]
+ self.assertEqual(workdir, container.get('Config.WorkingDir'))
+
+ def test_run_service_with_workdir_overridden_short_form(self):
+ self.base_dir = 'tests/fixtures/run-workdir'
+ name = 'service'
+ workdir = '/var'
+ self.dispatch(['run', '-w', workdir, name])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=True)[0]
+ self.assertEqual(workdir, container.get('Config.WorkingDir'))
+
+ @v2_only()
+ def test_run_interactive_connects_to_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+
+ self.dispatch(['up', '-d'])
+ self.dispatch(['run', 'app', 'nslookup', 'app'])
+ self.dispatch(['run', 'app', 'nslookup', 'db'])
+
+ containers = self.project.get_service('app').containers(
+ stopped=True, one_off=OneOffFilter.only)
+ assert len(containers) == 2
+
+ for container in containers:
+ networks = container.get('NetworkSettings.Networks')
+
+ assert sorted(list(networks)) == [
+ '{}_{}'.format(self.project.name, name)
+ for name in ['back', 'front']
+ ]
+
+ for _, config in networks.items():
+ # TODO: once we drop support for API <1.24, this can be changed to:
+ # assert config['Aliases'] == [container.short_id]
+ aliases = set(config['Aliases'] or []) - set([container.short_id])
+ assert not aliases
+
+ @v2_only()
+ def test_run_detached_connects_to_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['up', '-d'])
+ self.dispatch(['run', '-d', 'app', 'top'])
+
+ container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0]
+ networks = container.get('NetworkSettings.Networks')
+
+ assert sorted(list(networks)) == [
+ '{}_{}'.format(self.project.name, name)
+ for name in ['back', 'front']
+ ]
+
+ for _, config in networks.items():
+ # TODO: once we drop support for API <1.24, this can be changed to:
+ # assert config['Aliases'] == [container.short_id]
+ aliases = set(config['Aliases'] or []) - set([container.short_id])
+ assert not aliases
+
+ assert self.lookup(container, 'app')
+ assert self.lookup(container, 'db')
+
+ def test_run_handles_sigint(self):
+ proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'running'))
+
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'exited'))
+
+ def test_run_handles_sigterm(self):
+ proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'running'))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'exited'))
+
+ @mock.patch.dict(os.environ)
+ def test_run_unicode_env_values_from_system(self):
+ value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż'
+ if six.PY2: # os.environ doesn't support unicode values in Py2
+ os.environ['BAR'] = value.encode('utf-8')
+ else: # ... and doesn't support byte values in Py3
+ os.environ['BAR'] = value
+ self.base_dir = 'tests/fixtures/unicode-environment'
+ result = self.dispatch(['run', 'simple'])
+
+ if six.PY2: # Can't retrieve output on Py3. See issue #3670
+ assert value == result.stdout.strip()
+
+ container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ environment = container.get('Config.Env')
+ assert 'FOO={}'.format(value) in environment
+
+ @mock.patch.dict(os.environ)
+ def test_run_env_values_from_system(self):
+ os.environ['FOO'] = 'bar'
+ os.environ['BAR'] = 'baz'
+
+ self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
+
+ container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ environment = container.get('Config.Env')
+ assert 'FOO=bar' in environment
+ assert 'BAR=baz' not in environment
+
+ def test_rm(self):
+ service = self.project.get_service('simple')
+ service.create_container()
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['rm', '--force'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ service = self.project.get_service('simple')
+ service.create_container()
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['rm', '-f'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ service = self.project.get_service('simple')
+ service.create_container()
+ self.dispatch(['rm', '-fs'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+
+ def test_rm_stop(self):
+ self.dispatch(['up', '-d'], None)
+ simple = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs'], None)
+ assert len(simple.containers(stopped=True)) == 0
+ assert len(another.containers(stopped=True)) == 0
+
+ self.dispatch(['up', '-d'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs', 'another'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers(stopped=True)) == 0
+
+ def test_rm_all(self):
+ service = self.project.get_service('simple')
+ service.create_container(one_off=False)
+ service.create_container(one_off=True)
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ self.dispatch(['rm', '-f'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ service.create_container(one_off=False)
+ service.create_container(one_off=True)
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ self.dispatch(['rm', '-f', '--all'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ def test_stop(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['stop', '-t', '1'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_stop_signal(self):
+ self.base_dir = 'tests/fixtures/stop-signal-composefile'
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['stop', '-t', '1'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+ self.assertEqual(service.containers(stopped=True)[0].exit_code, 0)
+
+ def test_start_no_containers(self):
+ result = self.dispatch(['start'], returncode=1)
+ assert 'No containers to start' in result.stderr
+
+ @v2_only()
+ def test_up_logging(self):
+ self.base_dir = 'tests/fixtures/logging-composefile'
+ self.dispatch(['up', '-d'])
+ simple = self.project.get_service('simple').containers()[0]
+ log_config = simple.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ another = self.project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'json-file')
+ self.assertEqual(log_config.get('Config')['max-size'], '10m')
+
+ def test_up_logging_legacy(self):
+ self.base_dir = 'tests/fixtures/logging-composefile-legacy'
+ self.dispatch(['up', '-d'])
+ simple = self.project.get_service('simple').containers()[0]
+ log_config = simple.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ another = self.project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'json-file')
+ self.assertEqual(log_config.get('Config')['max-size'], '10m')
+
+ def test_pause_unpause(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertFalse(service.containers()[0].is_paused)
+
+ self.dispatch(['pause'], None)
+ self.assertTrue(service.containers()[0].is_paused)
+
+ self.dispatch(['unpause'], None)
+ self.assertFalse(service.containers()[0].is_paused)
+
+ def test_pause_no_containers(self):
+ result = self.dispatch(['pause'], returncode=1)
+ assert 'No containers to pause' in result.stderr
+
+ def test_unpause_no_containers(self):
+ result = self.dispatch(['unpause'], returncode=1)
+ assert 'No containers to unpause' in result.stderr
+
+ def test_logs_invalid_service_name(self):
+ self.dispatch(['logs', 'madeupname'], returncode=1)
+
+ def test_logs_follow(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs', '-f'])
+
+ if not is_cluster(self.client):
+ assert result.stdout.count('\n') == 5
+ else:
+ # Sometimes logs are picked up from old containers that haven't yet
+ # been removed (removal in Swarm is async)
+ assert result.stdout.count('\n') >= 5
+
+ assert 'simple' in result.stdout
+ assert 'another' in result.stdout
+ assert 'exited with code 0' in result.stdout
+
+ def test_logs_follow_logs_from_new_containers(self):
+ self.base_dir = 'tests/fixtures/logs-composefile'
+ self.dispatch(['up', '-d', 'simple'])
+
+ proc = start_process(self.base_dir, ['logs', '-f'])
+
+ self.dispatch(['up', '-d', 'another'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'logscomposefile_another_1',
+ 'exited'))
+
+ self.dispatch(['kill', 'simple'])
+
+ result = wait_on_process(proc)
+
+ assert 'hello' in result.stdout
+ assert 'test' in result.stdout
+ assert 'logscomposefile_another_1 exited with code 0' in result.stdout
+ assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
+
+ def test_logs_default(self):
+ self.base_dir = 'tests/fixtures/logs-composefile'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs'])
+ assert 'hello' in result.stdout
+ assert 'test' in result.stdout
+ assert 'exited with' not in result.stdout
+
+ def test_logs_on_stopped_containers_exits(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up'])
+
+ result = self.dispatch(['logs'])
+ assert 'simple' in result.stdout
+ assert 'another' in result.stdout
+ assert 'exited with' not in result.stdout
+
+ def test_logs_timestamps(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs', '-f', '-t'])
+ self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})')
+
+ def test_logs_tail(self):
+ self.base_dir = 'tests/fixtures/logs-tail-composefile'
+ self.dispatch(['up'])
+
+ result = self.dispatch(['logs', '--tail', '2'])
+ assert 'c\n' in result.stdout
+ assert 'd\n' in result.stdout
+ assert 'a\n' not in result.stdout
+ assert 'b\n' not in result.stdout
+
+ def test_kill(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_kill_signal_sigstop(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill', '-s', 'SIGSTOP'], None)
+
+ self.assertEqual(len(service.containers()), 1)
+ # The container is still running. It has only been paused
+ self.assertTrue(service.containers()[0].is_running)
+
+ def test_kill_stopped_service(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.dispatch(['kill', '-s', 'SIGSTOP'], None)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill', '-s', 'SIGKILL'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_restart(self):
+ service = self.project.get_service('simple')
+ container = service.create_container()
+ service.start_container(container)
+ started_at = container.dictionary['State']['StartedAt']
+ self.dispatch(['restart', '-t', '1'], None)
+ container.inspect()
+ self.assertNotEqual(
+ container.dictionary['State']['FinishedAt'],
+ '0001-01-01T00:00:00Z',
+ )
+ self.assertNotEqual(
+ container.dictionary['State']['StartedAt'],
+ started_at,
+ )
+
+ def test_restart_stopped_container(self):
+ service = self.project.get_service('simple')
+ container = service.create_container()
+ container.start()
+ container.kill()
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['restart', '-t', '1'], None)
+ self.assertEqual(len(service.containers(stopped=False)), 1)
+
+ def test_restart_no_containers(self):
+ result = self.dispatch(['restart'], returncode=1)
+ assert 'No containers to restart' in result.stderr
+
+ def test_scale(self):
+ project = self.project
+
+ self.dispatch(['scale', 'simple=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+
+ self.dispatch(['scale', 'simple=3', 'another=2'])
+ self.assertEqual(len(project.get_service('simple').containers()), 3)
+ self.assertEqual(len(project.get_service('another').containers()), 2)
+
+ self.dispatch(['scale', 'simple=1', 'another=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+ self.assertEqual(len(project.get_service('another').containers()), 1)
+
+ self.dispatch(['scale', 'simple=1', 'another=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+ self.assertEqual(len(project.get_service('another').containers()), 1)
+
+ self.dispatch(['scale', 'simple=0', 'another=0'])
+ self.assertEqual(len(project.get_service('simple').containers()), 0)
+ self.assertEqual(len(project.get_service('another').containers()), 0)
+
+ def test_scale_v2_2(self):
+ self.base_dir = 'tests/fixtures/scale'
+ result = self.dispatch(['scale', 'web=1'], returncode=1)
+ assert 'incompatible with the v2.2 format' in result.stderr
+
+ def test_up_scale_scale_up(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_scale_down(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=1'])
+ assert len(project.get_service('web').containers()) == 1
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_reset(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 3
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_to_zero(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
+ assert len(project.get_service('web').containers()) == 0
+ assert len(project.get_service('db').containers()) == 0
+
+ def test_port(self):
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['up', '-d'], None)
+ container = self.project.get_service('simple').get_container()
+
+ def get_port(number):
+ result = self.dispatch(['port', 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':49152' in get_port(3001)
+ assert ':49153' in get_port(3002)
+
+ def test_expanded_port(self):
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['-f', 'expanded-notation.yml', 'up', '-d'])
+ container = self.project.get_service('simple').get_container()
+
+ def get_port(number):
+ result = self.dispatch(['port', 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':53222' in get_port(3001)
+ assert ':53223' in get_port(3002)
+
+ def test_port_with_scale(self):
+ self.base_dir = 'tests/fixtures/ports-composefile-scale'
+ self.dispatch(['scale', 'simple=2'], None)
+ containers = sorted(
+ self.project.containers(service_names=['simple']),
+ key=attrgetter('name'))
+
+ def get_port(number, index=None):
+ if index is None:
+ result = self.dispatch(['port', 'simple', str(number)])
+ else:
+ result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
+ self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
+ self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
+ self.assertEqual(get_port(3002), "")
+
+ def test_events_json(self):
+ events_proc = start_process(self.base_dir, ['events', '--json'])
+ self.dispatch(['up', '-d'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(events_proc.pid, signal.SIGINT)
+ result = wait_on_process(events_proc, returncode=1)
+ lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')]
+ assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2}
+
+ def test_events_human_readable(self):
+
+ def has_timestamp(string):
+ str_iso_date, str_iso_time, container_info = string.split(' ', 2)
+ try:
+ return isinstance(datetime.datetime.strptime(
+ '%s %s' % (str_iso_date, str_iso_time),
+ '%Y-%m-%d %H:%M:%S.%f'),
+ datetime.datetime)
+ except ValueError:
+ return False
+
+ events_proc = start_process(self.base_dir, ['events'])
+ self.dispatch(['up', '-d', 'simple'])
+ wait_on_condition(ContainerCountCondition(self.project, 1))
+
+ os.kill(events_proc.pid, signal.SIGINT)
+ result = wait_on_process(events_proc, returncode=1)
+ lines = result.stdout.rstrip().split('\n')
+ assert len(lines) == 2
+
+ container, = self.project.containers()
+ expected_template = ' container {} {}'
+ expected_meta_info = ['image=busybox:latest', 'name=simplecomposefile_simple_1']
+
+ assert expected_template.format('create', container.id) in lines[0]
+ assert expected_template.format('start', container.id) in lines[1]
+ for line in lines:
+ for info in expected_meta_info:
+ assert info in line
+
+ assert has_timestamp(lines[0])
+
+ def test_env_file_relative_to_compose_file(self):
+ config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
+ self.dispatch(['-f', config_path, 'up', '-d'], None)
+ self._project = get_project(self.base_dir, [config_path])
+
+ containers = self.project.containers(stopped=True)
+ self.assertEqual(len(containers), 1)
+ self.assertIn("FOO=1", containers[0].get('Config.Env'))
+
+ @mock.patch.dict(os.environ)
+ def test_home_and_env_var_in_volume_path(self):
+ os.environ['VOLUME_NAME'] = 'my-volume'
+ os.environ['HOME'] = '/tmp/home-dir'
+
+ self.base_dir = 'tests/fixtures/volume-path-interpolation'
+ self.dispatch(['up', '-d'], None)
+
+ container = self.project.containers(stopped=True)[0]
+ actual_host_path = container.get_mount('/container-path')['Source']
+ components = actual_host_path.split('/')
+ assert components[-2:] == ['home-dir', 'my-volume']
+
+ def test_up_with_default_override_file(self):
+ self.base_dir = 'tests/fixtures/override-files'
+ self.dispatch(['up', '-d'], None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 2)
+
+ web, db = containers
+ self.assertEqual(web.human_readable_command, 'top')
+ self.assertEqual(db.human_readable_command, 'top')
+
+ def test_up_with_multiple_files(self):
+ self.base_dir = 'tests/fixtures/override-files'
+ config_paths = [
+ 'docker-compose.yml',
+ 'docker-compose.override.yml',
+ 'extra.yml',
+ ]
+ self._project = get_project(self.base_dir, config_paths)
+ self.dispatch(
+ [
+ '-f', config_paths[0],
+ '-f', config_paths[1],
+ '-f', config_paths[2],
+ 'up', '-d',
+ ],
+ None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 3)
+
+ web, other, db = containers
+ self.assertEqual(web.human_readable_command, 'top')
+ self.assertEqual(db.human_readable_command, 'top')
+ self.assertEqual(other.human_readable_command, 'top')
+
+ def test_up_with_extends(self):
+ self.base_dir = 'tests/fixtures/extends'
+ self.dispatch(['up', '-d'], None)
+
+ self.assertEqual(
+ set([s.name for s in self.project.services]),
+ set(['mydb', 'myweb']),
+ )
+
+ # Sort by name so we get [db, web]
+ containers = sorted(
+ self.project.containers(stopped=True),
+ key=lambda c: c.name,
+ )
+
+ self.assertEqual(len(containers), 2)
+ web = containers[1]
+
+ self.assertEqual(
+ set(get_links(web)),
+ set(['db', 'mydb_1', 'extends_mydb_1']))
+
+ expected_env = set([
+ "FOO=1",
+ "BAR=2",
+ "BAZ=2",
+ ])
+ self.assertTrue(expected_env <= set(web.get('Config.Env')))
+
+ def test_top_services_not_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ result = self.dispatch(['top'])
+ assert len(result.stdout) == 0
+
+ def test_top_services_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+
+ self.assertIn('top_service_a', result.stdout)
+ self.assertIn('top_service_b', result.stdout)
+ self.assertNotIn('top_not_a_service', result.stdout)
+
+ def test_top_processes_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+ assert result.stdout.count("top") == 4
+
+ def test_forward_exitval(self):
+ self.base_dir = 'tests/fixtures/exit-code-from'
+ proc = start_process(
+ self.base_dir,
+ ['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
+
+ result = wait_on_process(proc, returncode=1)
+
+ assert 'exitcodefrom_another_1 exited with code 1' in result.stdout
+
+ def test_images(self):
+ self.project.get_service('simple').create_container()
+ result = self.dispatch(['images'])
+ assert 'busybox' in result.stdout
+ assert 'simplecomposefile_simple_1' in result.stdout
+
+ def test_images_default_composefile(self):
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['images'])
+
+ assert 'busybox' in result.stdout
+ assert 'multiplecomposefiles_another_1' in result.stdout
+ assert 'multiplecomposefiles_simple_1' in result.stdout
+
+ def test_up_with_override_yaml(self):
+ self.base_dir = 'tests/fixtures/override-yaml-files'
+ self._project = get_project(self.base_dir, [])
+ self.dispatch(
+ [
+ 'up', '-d',
+ ],
+ None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 2)
+
+ web, db = containers
+ self.assertEqual(web.human_readable_command, 'sleep 100')
+ self.assertEqual(db.human_readable_command, 'top')
+
+ def test_up_with_duplicate_override_yaml_files(self):
+ self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
+ with self.assertRaises(DuplicateOverrideFileFound):
+ get_project(self.base_dir, [])
+ self.base_dir = None
diff --git a/tests/fixtures/UpperCaseDir/docker-compose.yml b/tests/fixtures/UpperCaseDir/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/UpperCaseDir/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/abort-on-container-exit-0/docker-compose.yml b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
new file mode 100644
index 00000000..ce41697b
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls .
diff --git a/tests/fixtures/abort-on-container-exit-1/docker-compose.yml b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
new file mode 100644
index 00000000..7ec9b7e1
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls /thecakeisalie
diff --git a/tests/fixtures/build-ctx/Dockerfile b/tests/fixtures/build-ctx/Dockerfile
new file mode 100644
index 00000000..dd864b83
--- /dev/null
+++ b/tests/fixtures/build-ctx/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+CMD echo "success"
diff --git a/tests/fixtures/build-path-override-dir/docker-compose.yml b/tests/fixtures/build-path-override-dir/docker-compose.yml
new file mode 100644
index 00000000..15dbb3e6
--- /dev/null
+++ b/tests/fixtures/build-path-override-dir/docker-compose.yml
@@ -0,0 +1,2 @@
+foo:
+ build: ./build-ctx/
diff --git a/tests/fixtures/build-path/docker-compose.yml b/tests/fixtures/build-path/docker-compose.yml
new file mode 100644
index 00000000..66e8916e
--- /dev/null
+++ b/tests/fixtures/build-path/docker-compose.yml
@@ -0,0 +1,2 @@
+foo:
+ build: ../build-ctx/
diff --git a/tests/fixtures/build-shm-size/Dockerfile b/tests/fixtures/build-shm-size/Dockerfile
new file mode 100644
index 00000000..f91733d6
--- /dev/null
+++ b/tests/fixtures/build-shm-size/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+
+# Report the shm_size (through the size of /dev/shm)
+RUN echo "shm_size:" $(df -h /dev/shm | tail -n 1 | awk '{print $2}')
diff --git a/tests/fixtures/build-shm-size/docker-compose.yml b/tests/fixtures/build-shm-size/docker-compose.yml
new file mode 100644
index 00000000..238a5132
--- /dev/null
+++ b/tests/fixtures/build-shm-size/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '3.5'
+
+services:
+ custom_shm_size:
+ build:
+ context: .
+ shm_size: 100663296 # =96M
diff --git a/tests/fixtures/bundle-with-digests/docker-compose.yml b/tests/fixtures/bundle-with-digests/docker-compose.yml
new file mode 100644
index 00000000..b7013512
--- /dev/null
+++ b/tests/fixtures/bundle-with-digests/docker-compose.yml
@@ -0,0 +1,9 @@
+
+version: '2.0'
+
+services:
+ web:
+ image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
+
+ redis:
+ image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b
diff --git a/tests/fixtures/commands-composefile/docker-compose.yml b/tests/fixtures/commands-composefile/docker-compose.yml
new file mode 100644
index 00000000..87602bd6
--- /dev/null
+++ b/tests/fixtures/commands-composefile/docker-compose.yml
@@ -0,0 +1,5 @@
+implicit:
+ image: composetest_test
+explicit:
+ image: composetest_test
+ command: [ "/bin/true" ]
diff --git a/tests/fixtures/default-env-file/.env b/tests/fixtures/default-env-file/.env
new file mode 100644
index 00000000..9056de72
--- /dev/null
+++ b/tests/fixtures/default-env-file/.env
@@ -0,0 +1,4 @@
+IMAGE=alpine:latest
+COMMAND=true
+PORT1=5643
+PORT2=9999
diff --git a/tests/fixtures/default-env-file/docker-compose.yml b/tests/fixtures/default-env-file/docker-compose.yml
new file mode 100644
index 00000000..aa8e4409
--- /dev/null
+++ b/tests/fixtures/default-env-file/docker-compose.yml
@@ -0,0 +1,6 @@
+web:
+ image: ${IMAGE}
+ command: ${COMMAND}
+ ports:
+ - $PORT1
+ - $PORT2
diff --git a/tests/fixtures/dockerfile-with-volume/Dockerfile b/tests/fixtures/dockerfile-with-volume/Dockerfile
new file mode 100644
index 00000000..0d376ec4
--- /dev/null
+++ b/tests/fixtures/dockerfile-with-volume/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+VOLUME /data
+CMD top
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
new file mode 100644
index 00000000..f1b8ef18
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
@@ -0,0 +1,3 @@
+
+db:
+ command: "sleep 300"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/echo-services/docker-compose.yml b/tests/fixtures/echo-services/docker-compose.yml
new file mode 100644
index 00000000..8014f3d9
--- /dev/null
+++ b/tests/fixtures/echo-services/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: echo simple
+another:
+ image: busybox:latest
+ command: echo another
diff --git a/tests/fixtures/entrypoint-composefile/docker-compose.yml b/tests/fixtures/entrypoint-composefile/docker-compose.yml
new file mode 100644
index 00000000..e9880973
--- /dev/null
+++ b/tests/fixtures/entrypoint-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+version: "2"
+services:
+ test:
+ image: busybox
+ entrypoint: printf
+ command: default args
diff --git a/tests/fixtures/entrypoint-dockerfile/Dockerfile b/tests/fixtures/entrypoint-dockerfile/Dockerfile
new file mode 100644
index 00000000..49f4416c
--- /dev/null
+++ b/tests/fixtures/entrypoint-dockerfile/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+ENTRYPOINT ["printf"]
+CMD ["default", "args"]
diff --git a/tests/fixtures/entrypoint-dockerfile/docker-compose.yml b/tests/fixtures/entrypoint-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..8318e61f
--- /dev/null
+++ b/tests/fixtures/entrypoint-dockerfile/docker-compose.yml
@@ -0,0 +1,4 @@
+version: "2"
+services:
+ test:
+ build: .
diff --git a/tests/fixtures/env-file/docker-compose.yml b/tests/fixtures/env-file/docker-compose.yml
new file mode 100644
index 00000000..d9366ace
--- /dev/null
+++ b/tests/fixtures/env-file/docker-compose.yml
@@ -0,0 +1,4 @@
+web:
+ image: busybox
+ command: /bin/true
+ env_file: ./test.env
diff --git a/tests/fixtures/env-file/test.env b/tests/fixtures/env-file/test.env
new file mode 100644
index 00000000..d99cd41a
--- /dev/null
+++ b/tests/fixtures/env-file/test.env
@@ -0,0 +1 @@
+FOO=1
diff --git a/tests/fixtures/env/one.env b/tests/fixtures/env/one.env
new file mode 100644
index 00000000..45b59fe6
--- /dev/null
+++ b/tests/fixtures/env/one.env
@@ -0,0 +1,11 @@
+# Keep the blank lines and comments in this file, please
+
+ONE=2
+TWO=1
+
+ # (thanks)
+
+THREE=3
+
+FOO=bar
+# FOO=somethingelse
diff --git a/tests/fixtures/env/resolve.env b/tests/fixtures/env/resolve.env
new file mode 100644
index 00000000..b4f76b29
--- /dev/null
+++ b/tests/fixtures/env/resolve.env
@@ -0,0 +1,4 @@
+FILE_DEF=bär
+FILE_DEF_EMPTY=
+ENV_DEF
+NO_DEF
diff --git a/tests/fixtures/env/two.env b/tests/fixtures/env/two.env
new file mode 100644
index 00000000..3b21871a
--- /dev/null
+++ b/tests/fixtures/env/two.env
@@ -0,0 +1,2 @@
+FOO=baz
+DOO=dah
diff --git a/tests/fixtures/environment-composefile/docker-compose.yml b/tests/fixtures/environment-composefile/docker-compose.yml
new file mode 100644
index 00000000..9d99fee0
--- /dev/null
+++ b/tests/fixtures/environment-composefile/docker-compose.yml
@@ -0,0 +1,7 @@
+service:
+ image: busybox:latest
+ command: top
+
+ environment:
+ foo: bar
+ hello: world
diff --git a/tests/fixtures/environment-interpolation/docker-compose.yml b/tests/fixtures/environment-interpolation/docker-compose.yml
new file mode 100644
index 00000000..7ed43a81
--- /dev/null
+++ b/tests/fixtures/environment-interpolation/docker-compose.yml
@@ -0,0 +1,17 @@
+web:
+ # unbracketed name
+ image: $IMAGE
+
+ # array element
+ ports:
+ - "${HOST_PORT}:8000"
+
+ # dictionary item value
+ labels:
+ mylabel: "${LABEL_VALUE}"
+
+ # unset value
+ hostname: "host-${UNSET_VALUE}"
+
+ # escaped interpolation
+ command: "$${ESCAPED}"
diff --git a/tests/fixtures/exit-code-from/docker-compose.yml b/tests/fixtures/exit-code-from/docker-compose.yml
new file mode 100644
index 00000000..687e78b9
--- /dev/null
+++ b/tests/fixtures/exit-code-from/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:latest
+ command: /bin/false
diff --git a/tests/fixtures/expose-composefile/docker-compose.yml b/tests/fixtures/expose-composefile/docker-compose.yml
new file mode 100644
index 00000000..d14a468d
--- /dev/null
+++ b/tests/fixtures/expose-composefile/docker-compose.yml
@@ -0,0 +1,11 @@
+
+simple:
+ image: busybox:latest
+ command: top
+ expose:
+ - '3000'
+ - '3001/tcp'
+ - '3001/udp'
+ - '3002-3003'
+ - '3004-3005/tcp'
+ - '3006-3007/udp'
diff --git a/tests/fixtures/extends/circle-1.yml b/tests/fixtures/extends/circle-1.yml
new file mode 100644
index 00000000..d88ea61d
--- /dev/null
+++ b/tests/fixtures/extends/circle-1.yml
@@ -0,0 +1,12 @@
+foo:
+ image: busybox
+bar:
+ image: busybox
+web:
+ extends:
+ file: circle-2.yml
+ service: other
+baz:
+ image: busybox
+quux:
+ image: busybox
diff --git a/tests/fixtures/extends/circle-2.yml b/tests/fixtures/extends/circle-2.yml
new file mode 100644
index 00000000..de05bc8d
--- /dev/null
+++ b/tests/fixtures/extends/circle-2.yml
@@ -0,0 +1,12 @@
+foo:
+ image: busybox
+bar:
+ image: busybox
+other:
+ extends:
+ file: circle-1.yml
+ service: web
+baz:
+ image: busybox
+quux:
+ image: busybox
diff --git a/tests/fixtures/extends/common-env-labels-ulimits.yml b/tests/fixtures/extends/common-env-labels-ulimits.yml
new file mode 100644
index 00000000..09efb4e7
--- /dev/null
+++ b/tests/fixtures/extends/common-env-labels-ulimits.yml
@@ -0,0 +1,13 @@
+web:
+ extends:
+ file: common.yml
+ service: web
+ environment:
+ - FOO=2
+ - BAZ=3
+ labels: ['label=one']
+ ulimits:
+ nproc: 65535
+ memlock:
+ soft: 1024
+ hard: 2048
diff --git a/tests/fixtures/extends/common.yml b/tests/fixtures/extends/common.yml
new file mode 100644
index 00000000..b2d86aa4
--- /dev/null
+++ b/tests/fixtures/extends/common.yml
@@ -0,0 +1,7 @@
+web:
+ image: busybox
+ command: /bin/true
+ net: host
+ environment:
+ - FOO=1
+ - BAR=1
diff --git a/tests/fixtures/extends/docker-compose.yml b/tests/fixtures/extends/docker-compose.yml
new file mode 100644
index 00000000..8e37d404
--- /dev/null
+++ b/tests/fixtures/extends/docker-compose.yml
@@ -0,0 +1,17 @@
+myweb:
+ extends:
+ file: common.yml
+ service: web
+ command: top
+ links:
+ - "mydb:db"
+ environment:
+ # leave FOO alone
+ # override BAR
+ BAR: "2"
+ # add BAZ
+ BAZ: "2"
+ net: bridge
+mydb:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/extends/healthcheck-1.yml b/tests/fixtures/extends/healthcheck-1.yml
new file mode 100644
index 00000000..4c311e62
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-1.yml
@@ -0,0 +1,9 @@
+version: '2.1'
+services:
+ demo:
+ image: foobar:latest
+ healthcheck:
+ test: ["CMD", "/health.sh"]
+ interval: 10s
+ timeout: 5s
+ retries: 36
diff --git a/tests/fixtures/extends/healthcheck-2.yml b/tests/fixtures/extends/healthcheck-2.yml
new file mode 100644
index 00000000..11bc9f09
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-2.yml
@@ -0,0 +1,6 @@
+version: '2.1'
+services:
+ demo:
+ extends:
+ file: healthcheck-1.yml
+ service: demo
diff --git a/tests/fixtures/extends/invalid-links.yml b/tests/fixtures/extends/invalid-links.yml
new file mode 100644
index 00000000..cea740cb
--- /dev/null
+++ b/tests/fixtures/extends/invalid-links.yml
@@ -0,0 +1,11 @@
+mydb:
+ build: '.'
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ links:
+ - "mydb:db"
diff --git a/tests/fixtures/extends/invalid-net-v2.yml b/tests/fixtures/extends/invalid-net-v2.yml
new file mode 100644
index 00000000..7ba714e8
--- /dev/null
+++ b/tests/fixtures/extends/invalid-net-v2.yml
@@ -0,0 +1,12 @@
+version: "2"
+services:
+ myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+ web:
+ build: '.'
+ network_mode: "service:net"
+ net:
+ build: '.'
diff --git a/tests/fixtures/extends/invalid-net.yml b/tests/fixtures/extends/invalid-net.yml
new file mode 100644
index 00000000..fbcd020b
--- /dev/null
+++ b/tests/fixtures/extends/invalid-net.yml
@@ -0,0 +1,8 @@
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ net: "container:db"
diff --git a/tests/fixtures/extends/invalid-volumes.yml b/tests/fixtures/extends/invalid-volumes.yml
new file mode 100644
index 00000000..3db0118e
--- /dev/null
+++ b/tests/fixtures/extends/invalid-volumes.yml
@@ -0,0 +1,9 @@
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ volumes_from:
+ - "db"
diff --git a/tests/fixtures/extends/nested-intermediate.yml b/tests/fixtures/extends/nested-intermediate.yml
new file mode 100644
index 00000000..c2dd8c94
--- /dev/null
+++ b/tests/fixtures/extends/nested-intermediate.yml
@@ -0,0 +1,6 @@
+webintermediate:
+ extends:
+ file: common.yml
+ service: web
+ environment:
+ - "FOO=2"
diff --git a/tests/fixtures/extends/nested.yml b/tests/fixtures/extends/nested.yml
new file mode 100644
index 00000000..6025e6d5
--- /dev/null
+++ b/tests/fixtures/extends/nested.yml
@@ -0,0 +1,6 @@
+myweb:
+ extends:
+ file: nested-intermediate.yml
+ service: webintermediate
+ environment:
+ - "BAR=2"
diff --git a/tests/fixtures/extends/no-file-specified.yml b/tests/fixtures/extends/no-file-specified.yml
new file mode 100644
index 00000000..40e43c4b
--- /dev/null
+++ b/tests/fixtures/extends/no-file-specified.yml
@@ -0,0 +1,9 @@
+myweb:
+ extends:
+ service: web
+ environment:
+ - "BAR=1"
+web:
+ image: busybox
+ environment:
+ - "BAZ=3"
diff --git a/tests/fixtures/extends/nonexistent-path-base.yml b/tests/fixtures/extends/nonexistent-path-base.yml
new file mode 100644
index 00000000..4e6c82b0
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-path-base.yml
@@ -0,0 +1,6 @@
+dnebase:
+ build: nonexistent.path
+ command: /bin/true
+ environment:
+ - FOO=1
+ - BAR=1
diff --git a/tests/fixtures/extends/nonexistent-path-child.yml b/tests/fixtures/extends/nonexistent-path-child.yml
new file mode 100644
index 00000000..d3b732f2
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-path-child.yml
@@ -0,0 +1,8 @@
+dnechild:
+ extends:
+ file: nonexistent-path-base.yml
+ service: dnebase
+ image: busybox
+ command: /bin/true
+ environment:
+ - BAR=2
diff --git a/tests/fixtures/extends/nonexistent-service.yml b/tests/fixtures/extends/nonexistent-service.yml
new file mode 100644
index 00000000..e9e17f1b
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-service.yml
@@ -0,0 +1,4 @@
+web:
+ image: busybox
+ extends:
+ service: foo
diff --git a/tests/fixtures/extends/service-with-invalid-schema.yml b/tests/fixtures/extends/service-with-invalid-schema.yml
new file mode 100644
index 00000000..00c36647
--- /dev/null
+++ b/tests/fixtures/extends/service-with-invalid-schema.yml
@@ -0,0 +1,4 @@
+myweb:
+ extends:
+ file: valid-composite-extends.yml
+ service: web
diff --git a/tests/fixtures/extends/service-with-valid-composite-extends.yml b/tests/fixtures/extends/service-with-valid-composite-extends.yml
new file mode 100644
index 00000000..6c419ed0
--- /dev/null
+++ b/tests/fixtures/extends/service-with-valid-composite-extends.yml
@@ -0,0 +1,5 @@
+myweb:
+ build: '.'
+ extends:
+ file: 'valid-composite-extends.yml'
+ service: web
diff --git a/tests/fixtures/extends/specify-file-as-self.yml b/tests/fixtures/extends/specify-file-as-self.yml
new file mode 100644
index 00000000..c24f10bc
--- /dev/null
+++ b/tests/fixtures/extends/specify-file-as-self.yml
@@ -0,0 +1,17 @@
+myweb:
+ extends:
+ file: specify-file-as-self.yml
+ service: web
+ environment:
+ - "BAR=1"
+web:
+ extends:
+ file: specify-file-as-self.yml
+ service: otherweb
+ image: busybox
+ environment:
+ - "BAZ=3"
+otherweb:
+ image: busybox
+ environment:
+ - "YEP=1"
diff --git a/tests/fixtures/extends/valid-common-config.yml b/tests/fixtures/extends/valid-common-config.yml
new file mode 100644
index 00000000..d8f13e7a
--- /dev/null
+++ b/tests/fixtures/extends/valid-common-config.yml
@@ -0,0 +1,6 @@
+myweb:
+ build: '.'
+ extends:
+ file: valid-common.yml
+ service: common-config
+ command: top
diff --git a/tests/fixtures/extends/valid-common.yml b/tests/fixtures/extends/valid-common.yml
new file mode 100644
index 00000000..07ad68e3
--- /dev/null
+++ b/tests/fixtures/extends/valid-common.yml
@@ -0,0 +1,3 @@
+common-config:
+ environment:
+ - FOO=1
diff --git a/tests/fixtures/extends/valid-composite-extends.yml b/tests/fixtures/extends/valid-composite-extends.yml
new file mode 100644
index 00000000..8816c3f3
--- /dev/null
+++ b/tests/fixtures/extends/valid-composite-extends.yml
@@ -0,0 +1,2 @@
+web:
+ command: top
diff --git a/tests/fixtures/extends/valid-interpolation-2.yml b/tests/fixtures/extends/valid-interpolation-2.yml
new file mode 100644
index 00000000..cb7bd93f
--- /dev/null
+++ b/tests/fixtures/extends/valid-interpolation-2.yml
@@ -0,0 +1,3 @@
+web:
+ build: '.'
+ hostname: "host-${HOSTNAME_VALUE}"
diff --git a/tests/fixtures/extends/valid-interpolation.yml b/tests/fixtures/extends/valid-interpolation.yml
new file mode 100644
index 00000000..68e8740f
--- /dev/null
+++ b/tests/fixtures/extends/valid-interpolation.yml
@@ -0,0 +1,5 @@
+myweb:
+ extends:
+ service: web
+ file: valid-interpolation-2.yml
+ command: top
diff --git a/tests/fixtures/extends/verbose-and-shorthand.yml b/tests/fixtures/extends/verbose-and-shorthand.yml
new file mode 100644
index 00000000..d3816302
--- /dev/null
+++ b/tests/fixtures/extends/verbose-and-shorthand.yml
@@ -0,0 +1,15 @@
+base:
+ image: busybox
+ environment:
+ - "BAR=1"
+
+verbose:
+ extends:
+ service: base
+ environment:
+ - "FOO=1"
+
+shorthand:
+ extends: base
+ environment:
+ - "FOO=2"
diff --git a/tests/fixtures/healthcheck/docker-compose.yml b/tests/fixtures/healthcheck/docker-compose.yml
new file mode 100644
index 00000000..2c45b8d8
--- /dev/null
+++ b/tests/fixtures/healthcheck/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "3"
+services:
+ passes:
+ image: busybox
+ command: top
+ healthcheck:
+ test: "/bin/true"
+ interval: 1s
+ timeout: 30m
+ retries: 1
+
+ fails:
+ image: busybox
+ command: top
+ healthcheck:
+ test: ["CMD", "/bin/false"]
+ interval: 2.5s
+ retries: 2
+
+ disabled:
+ image: busybox
+ command: top
+ healthcheck:
+ disable: true
diff --git a/tests/fixtures/invalid-composefile/invalid.yml b/tests/fixtures/invalid-composefile/invalid.yml
new file mode 100644
index 00000000..0e74be44
--- /dev/null
+++ b/tests/fixtures/invalid-composefile/invalid.yml
@@ -0,0 +1,5 @@
+
+notaservice: oops
+
+web:
+ image: 'alpine:edge'
diff --git a/tests/fixtures/links-composefile/docker-compose.yml b/tests/fixtures/links-composefile/docker-compose.yml
new file mode 100644
index 00000000..930fd4c7
--- /dev/null
+++ b/tests/fixtures/links-composefile/docker-compose.yml
@@ -0,0 +1,11 @@
+db:
+ image: busybox:latest
+ command: top
+web:
+ image: busybox:latest
+ command: top
+ links:
+ - db:db
+console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/logging-composefile-legacy/docker-compose.yml b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
new file mode 100644
index 00000000..ee994107
--- /dev/null
+++ b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
@@ -0,0 +1,10 @@
+simple:
+ image: busybox:latest
+ command: top
+ log_driver: "none"
+another:
+ image: busybox:latest
+ command: top
+ log_driver: "json-file"
+ log_opt:
+ max-size: "10m"
diff --git a/tests/fixtures/logging-composefile/docker-compose.yml b/tests/fixtures/logging-composefile/docker-compose.yml
new file mode 100644
index 00000000..466d13e5
--- /dev/null
+++ b/tests/fixtures/logging-composefile/docker-compose.yml
@@ -0,0 +1,14 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ logging:
+ driver: "none"
+ another:
+ image: busybox:latest
+ command: top
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
diff --git a/tests/fixtures/logs-composefile/docker-compose.yml b/tests/fixtures/logs-composefile/docker-compose.yml
new file mode 100644
index 00000000..b719c91e
--- /dev/null
+++ b/tests/fixtures/logs-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:latest
+ command: sh -c "echo test"
diff --git a/tests/fixtures/logs-tail-composefile/docker-compose.yml b/tests/fixtures/logs-tail-composefile/docker-compose.yml
new file mode 100644
index 00000000..80d8feae
--- /dev/null
+++ b/tests/fixtures/logs-tail-composefile/docker-compose.yml
@@ -0,0 +1,3 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo a && echo b && echo c && echo d"
diff --git a/tests/fixtures/longer-filename-composefile/docker-compose.yaml b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
new file mode 100644
index 00000000..a4eba2d0
--- /dev/null
+++ b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
@@ -0,0 +1,3 @@
+definedinyamlnotyml:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/multiple-composefiles/compose2.yml b/tests/fixtures/multiple-composefiles/compose2.yml
new file mode 100644
index 00000000..56803380
--- /dev/null
+++ b/tests/fixtures/multiple-composefiles/compose2.yml
@@ -0,0 +1,3 @@
+yetanother:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/multiple-composefiles/docker-compose.yml b/tests/fixtures/multiple-composefiles/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/multiple-composefiles/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/net-container/docker-compose.yml b/tests/fixtures/net-container/docker-compose.yml
new file mode 100644
index 00000000..b5506e0e
--- /dev/null
+++ b/tests/fixtures/net-container/docker-compose.yml
@@ -0,0 +1,7 @@
+foo:
+ image: busybox
+ command: top
+ net: "container:bar"
+bar:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/net-container/v2-invalid.yml b/tests/fixtures/net-container/v2-invalid.yml
new file mode 100644
index 00000000..9b846295
--- /dev/null
+++ b/tests/fixtures/net-container/v2-invalid.yml
@@ -0,0 +1,10 @@
+version: "2"
+
+services:
+ foo:
+ image: busybox
+ command: top
+ bar:
+ image: busybox
+ command: top
+ net: "container:foo"
diff --git a/tests/fixtures/networks/bridge.yml b/tests/fixtures/networks/bridge.yml
new file mode 100644
index 00000000..9fa7db82
--- /dev/null
+++ b/tests/fixtures/networks/bridge.yml
@@ -0,0 +1,9 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - bridge
+ - default
diff --git a/tests/fixtures/networks/default-network-config.yml b/tests/fixtures/networks/default-network-config.yml
new file mode 100644
index 00000000..4bd0989b
--- /dev/null
+++ b/tests/fixtures/networks/default-network-config.yml
@@ -0,0 +1,13 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
+networks:
+ default:
+ driver: bridge
+ driver_opts:
+ "com.docker.network.bridge.enable_icc": "false"
diff --git a/tests/fixtures/networks/docker-compose.yml b/tests/fixtures/networks/docker-compose.yml
new file mode 100644
index 00000000..c11fa682
--- /dev/null
+++ b/tests/fixtures/networks/docker-compose.yml
@@ -0,0 +1,21 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks: ["front"]
+ app:
+ image: busybox
+ command: top
+ networks: ["front", "back"]
+ links:
+ - "db:database"
+ db:
+ image: busybox
+ command: top
+ networks: ["back"]
+
+networks:
+ front: {}
+ back: {}
diff --git a/tests/fixtures/networks/external-default.yml b/tests/fixtures/networks/external-default.yml
new file mode 100644
index 00000000..5c9426b8
--- /dev/null
+++ b/tests/fixtures/networks/external-default.yml
@@ -0,0 +1,12 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
+networks:
+ default:
+ external:
+ name: composetest_external_network
diff --git a/tests/fixtures/networks/external-networks.yml b/tests/fixtures/networks/external-networks.yml
new file mode 100644
index 00000000..db75b780
--- /dev/null
+++ b/tests/fixtures/networks/external-networks.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - networks_foo
+ - bar
+
+networks:
+ networks_foo:
+ external: true
+ bar:
+ external:
+ name: networks_bar
diff --git a/tests/fixtures/networks/missing-network.yml b/tests/fixtures/networks/missing-network.yml
new file mode 100644
index 00000000..41012535
--- /dev/null
+++ b/tests/fixtures/networks/missing-network.yml
@@ -0,0 +1,10 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks: ["foo"]
+
+networks:
+ bar: {}
diff --git a/tests/fixtures/networks/network-aliases.yml b/tests/fixtures/networks/network-aliases.yml
new file mode 100644
index 00000000..8cf7d5af
--- /dev/null
+++ b/tests/fixtures/networks/network-aliases.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ front:
+ aliases:
+ - forward_facing
+ - ahead
+ back:
+
+networks:
+ front: {}
+ back: {}
diff --git a/tests/fixtures/networks/network-internal.yml b/tests/fixtures/networks/network-internal.yml
new file mode 100755
index 00000000..1fa339b1
--- /dev/null
+++ b/tests/fixtures/networks/network-internal.yml
@@ -0,0 +1,13 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - internal
+
+networks:
+ internal:
+ driver: bridge
+ internal: True
diff --git a/tests/fixtures/networks/network-label.yml b/tests/fixtures/networks/network-label.yml
new file mode 100644
index 00000000..fdb24f65
--- /dev/null
+++ b/tests/fixtures/networks/network-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - network_with_label
+
+networks:
+ network_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/fixtures/networks/network-mode.yml b/tests/fixtures/networks/network-mode.yml
new file mode 100644
index 00000000..e4d070b4
--- /dev/null
+++ b/tests/fixtures/networks/network-mode.yml
@@ -0,0 +1,27 @@
+version: "2"
+
+services:
+ bridge:
+ image: busybox
+ command: top
+ network_mode: bridge
+
+ service:
+ image: busybox
+ command: top
+ network_mode: "service:bridge"
+
+ container:
+ image: busybox
+ command: top
+ network_mode: "container:composetest_network_mode_container"
+
+ host:
+ image: busybox
+ command: top
+ network_mode: host
+
+ none:
+ image: busybox
+ command: top
+ network_mode: none
diff --git a/tests/fixtures/networks/network-static-addresses.yml b/tests/fixtures/networks/network-static-addresses.yml
new file mode 100755
index 00000000..f820ff6a
--- /dev/null
+++ b/tests/fixtures/networks/network-static-addresses.yml
@@ -0,0 +1,23 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ static_test:
+ ipv4_address: 172.16.100.100
+ ipv6_address: fe80::1001:100
+
+networks:
+ static_test:
+ driver: bridge
+ driver_opts:
+ com.docker.network.enable_ipv6: "true"
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.16.100.0/24
+ gateway: 172.16.100.1
+ - subnet: fe80::/64
+ gateway: fe80::1001:1
diff --git a/tests/fixtures/no-composefile/.gitignore b/tests/fixtures/no-composefile/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/no-composefile/.gitignore
diff --git a/tests/fixtures/no-links-composefile/docker-compose.yml b/tests/fixtures/no-links-composefile/docker-compose.yml
new file mode 100644
index 00000000..75a6a085
--- /dev/null
+++ b/tests/fixtures/no-links-composefile/docker-compose.yml
@@ -0,0 +1,9 @@
+db:
+ image: busybox:latest
+ command: top
+web:
+ image: busybox:latest
+ command: top
+console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/no-services/docker-compose.yml b/tests/fixtures/no-services/docker-compose.yml
new file mode 100644
index 00000000..6e76ec0c
--- /dev/null
+++ b/tests/fixtures/no-services/docker-compose.yml
@@ -0,0 +1,5 @@
+version: "2"
+
+networks:
+ foo: {}
+ bar: {}
diff --git a/tests/fixtures/override-files/docker-compose.override.yml b/tests/fixtures/override-files/docker-compose.override.yml
new file mode 100644
index 00000000..b2c54060
--- /dev/null
+++ b/tests/fixtures/override-files/docker-compose.override.yml
@@ -0,0 +1,7 @@
+version: '2.2'
+services:
+ web:
+ command: "top"
+
+ db:
+ command: "top"
diff --git a/tests/fixtures/override-files/docker-compose.yml b/tests/fixtures/override-files/docker-compose.yml
new file mode 100644
index 00000000..6c3d4e17
--- /dev/null
+++ b/tests/fixtures/override-files/docker-compose.yml
@@ -0,0 +1,10 @@
+version: '2.2'
+services:
+ web:
+ image: busybox:latest
+ command: "sleep 200"
+ depends_on:
+ - db
+ db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/override-files/extra.yml b/tests/fixtures/override-files/extra.yml
new file mode 100644
index 00000000..492c3795
--- /dev/null
+++ b/tests/fixtures/override-files/extra.yml
@@ -0,0 +1,10 @@
+version: '2.2'
+services:
+ web:
+ depends_on:
+ - db
+ - other
+
+ other:
+ image: busybox:latest
+ command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.override.yaml b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.yml b/tests/fixtures/override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/pid-mode/docker-compose.yml b/tests/fixtures/pid-mode/docker-compose.yml
new file mode 100644
index 00000000..fece5a9f
--- /dev/null
+++ b/tests/fixtures/pid-mode/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2.2"
+
+services:
+ service:
+ image: busybox
+ command: top
+ pid: "service:container"
+
+ container:
+ image: busybox
+ command: top
+ pid: "container:composetest_pid_mode_container"
+
+ host:
+ image: busybox
+ command: top
+ pid: host
diff --git a/tests/fixtures/ports-composefile-scale/docker-compose.yml b/tests/fixtures/ports-composefile-scale/docker-compose.yml
new file mode 100644
index 00000000..1a2bb485
--- /dev/null
+++ b/tests/fixtures/ports-composefile-scale/docker-compose.yml
@@ -0,0 +1,6 @@
+
+simple:
+ image: busybox:latest
+ command: /bin/sleep 300
+ ports:
+ - '3000'
diff --git a/tests/fixtures/ports-composefile/docker-compose.yml b/tests/fixtures/ports-composefile/docker-compose.yml
new file mode 100644
index 00000000..c213068d
--- /dev/null
+++ b/tests/fixtures/ports-composefile/docker-compose.yml
@@ -0,0 +1,8 @@
+
+simple:
+ image: busybox:latest
+ command: top
+ ports:
+ - '3000'
+ - '49152:3001'
+ - '49153-49154:3002-3003'
diff --git a/tests/fixtures/ports-composefile/expanded-notation.yml b/tests/fixtures/ports-composefile/expanded-notation.yml
new file mode 100644
index 00000000..09a7a2bf
--- /dev/null
+++ b/tests/fixtures/ports-composefile/expanded-notation.yml
@@ -0,0 +1,15 @@
+version: '3.2'
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ ports:
+ - target: 3000
+ - target: 3001
+ published: 53222
+ - target: 3002
+ published: 53223
+ protocol: tcp
+ - target: 3003
+ published: 53224
+ protocol: udp
diff --git a/tests/fixtures/restart/docker-compose.yml b/tests/fixtures/restart/docker-compose.yml
new file mode 100644
index 00000000..ecfdfbf5
--- /dev/null
+++ b/tests/fixtures/restart/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2"
+services:
+ never:
+ image: busybox
+ restart: "no"
+ always:
+ image: busybox
+ restart: always
+ on-failure:
+ image: busybox
+ restart: on-failure
+ on-failure-5:
+ image: busybox
+ restart: "on-failure:5"
+ restart-null:
+ image: busybox
+ restart: ""
diff --git a/tests/fixtures/run-workdir/docker-compose.yml b/tests/fixtures/run-workdir/docker-compose.yml
new file mode 100644
index 00000000..dc3ea86a
--- /dev/null
+++ b/tests/fixtures/run-workdir/docker-compose.yml
@@ -0,0 +1,4 @@
+service:
+ image: busybox:latest
+ working_dir: /etc
+ command: /bin/true
diff --git a/tests/fixtures/scale/docker-compose.yml b/tests/fixtures/scale/docker-compose.yml
new file mode 100644
index 00000000..a0d3b771
--- /dev/null
+++ b/tests/fixtures/scale/docker-compose.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ web:
+ image: busybox
+ command: top
+ scale: 2
+ db:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/secrets/default b/tests/fixtures/secrets/default
new file mode 100644
index 00000000..f9dc2014
--- /dev/null
+++ b/tests/fixtures/secrets/default
@@ -0,0 +1 @@
+This is the secret
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
new file mode 100644
index 00000000..fe717151
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ simple:
+ image: busybox:latest
+ volumes:
+ - datastore:/data1
+
+volumes:
+ datastore:
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
new file mode 100644
index 00000000..98a7d23b
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ image: busybox:latest
diff --git a/tests/fixtures/simple-composefile-volume-ready/files/example.txt b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
new file mode 100644
index 00000000..edb4d339
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
@@ -0,0 +1 @@
+FILE_CONTENT
diff --git a/tests/fixtures/simple-composefile/digest.yml b/tests/fixtures/simple-composefile/digest.yml
new file mode 100644
index 00000000..08f1d993
--- /dev/null
+++ b/tests/fixtures/simple-composefile/digest.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+digest:
+ image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
+ command: top
diff --git a/tests/fixtures/simple-composefile/docker-compose.yml b/tests/fixtures/simple-composefile/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/simple-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/simple-composefile/ignore-pull-failures.yml b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
new file mode 100644
index 00000000..a28f7922
--- /dev/null
+++ b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: nonexisting-image:latest
+ command: top
diff --git a/tests/fixtures/simple-dockerfile/Dockerfile b/tests/fixtures/simple-dockerfile/Dockerfile
new file mode 100644
index 00000000..dd864b83
--- /dev/null
+++ b/tests/fixtures/simple-dockerfile/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+CMD echo "success"
diff --git a/tests/fixtures/simple-dockerfile/docker-compose.yml b/tests/fixtures/simple-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..b0357541
--- /dev/null
+++ b/tests/fixtures/simple-dockerfile/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ build: .
diff --git a/tests/fixtures/simple-failing-dockerfile/Dockerfile b/tests/fixtures/simple-failing-dockerfile/Dockerfile
new file mode 100644
index 00000000..c2d06b16
--- /dev/null
+++ b/tests/fixtures/simple-failing-dockerfile/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+LABEL com.docker.compose.test_failing_image=true
+# With the following label the container wil be cleaned up automatically
+# Must be kept in sync with LABEL_PROJECT from compose/const.py
+LABEL com.docker.compose.project=composetest
+RUN exit 1
diff --git a/tests/fixtures/simple-failing-dockerfile/docker-compose.yml b/tests/fixtures/simple-failing-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..b0357541
--- /dev/null
+++ b/tests/fixtures/simple-failing-dockerfile/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ build: .
diff --git a/tests/fixtures/sleeps-composefile/docker-compose.yml b/tests/fixtures/sleeps-composefile/docker-compose.yml
new file mode 100644
index 00000000..7c8d84f8
--- /dev/null
+++ b/tests/fixtures/sleeps-composefile/docker-compose.yml
@@ -0,0 +1,10 @@
+
+version: "2"
+
+services:
+ simple:
+ image: busybox:latest
+ command: sleep 200
+ another:
+ image: busybox:latest
+ command: sleep 200
diff --git a/tests/fixtures/stop-signal-composefile/docker-compose.yml b/tests/fixtures/stop-signal-composefile/docker-compose.yml
new file mode 100644
index 00000000..04f58aa9
--- /dev/null
+++ b/tests/fixtures/stop-signal-composefile/docker-compose.yml
@@ -0,0 +1,10 @@
+simple:
+ image: busybox:latest
+ command:
+ - sh
+ - '-c'
+ - |
+ trap 'exit 0' SIGINT
+ trap 'exit 1' SIGTERM
+ while true; do :; done
+ stop_signal: SIGINT
diff --git a/tests/fixtures/tls/ca.pem b/tests/fixtures/tls/ca.pem
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/ca.pem
diff --git a/tests/fixtures/tls/cert.pem b/tests/fixtures/tls/cert.pem
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/cert.pem
diff --git a/tests/fixtures/tls/key.key b/tests/fixtures/tls/key.key
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/key.key
diff --git a/tests/fixtures/top/docker-compose.yml b/tests/fixtures/top/docker-compose.yml
new file mode 100644
index 00000000..d632a836
--- /dev/null
+++ b/tests/fixtures/top/docker-compose.yml
@@ -0,0 +1,6 @@
+service_a:
+ image: busybox:latest
+ command: top
+service_b:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/unicode-environment/docker-compose.yml b/tests/fixtures/unicode-environment/docker-compose.yml
new file mode 100644
index 00000000..a41af4f0
--- /dev/null
+++ b/tests/fixtures/unicode-environment/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '2'
+services:
+ simple:
+ image: busybox:latest
+ command: sh -c 'echo $$FOO'
+ environment:
+ FOO: ${BAR}
diff --git a/tests/fixtures/user-composefile/docker-compose.yml b/tests/fixtures/user-composefile/docker-compose.yml
new file mode 100644
index 00000000..3eb7d397
--- /dev/null
+++ b/tests/fixtures/user-composefile/docker-compose.yml
@@ -0,0 +1,4 @@
+service:
+ image: busybox:latest
+ user: notauser
+ command: id
diff --git a/tests/fixtures/v1-config/docker-compose.yml b/tests/fixtures/v1-config/docker-compose.yml
new file mode 100644
index 00000000..8646c4ed
--- /dev/null
+++ b/tests/fixtures/v1-config/docker-compose.yml
@@ -0,0 +1,10 @@
+net:
+ image: busybox
+volume:
+ image: busybox
+ volumes:
+ - /data
+app:
+ image: busybox
+ net: "container:net"
+ volumes_from: ["volume"]
diff --git a/tests/fixtures/v2-dependencies/docker-compose.yml b/tests/fixtures/v2-dependencies/docker-compose.yml
new file mode 100644
index 00000000..2e14b94b
--- /dev/null
+++ b/tests/fixtures/v2-dependencies/docker-compose.yml
@@ -0,0 +1,13 @@
+version: "2.0"
+services:
+ db:
+ image: busybox:latest
+ command: top
+ web:
+ image: busybox:latest
+ command: top
+ depends_on:
+ - db
+ console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v2-full/Dockerfile b/tests/fixtures/v2-full/Dockerfile
new file mode 100644
index 00000000..51ed0d90
--- /dev/null
+++ b/tests/fixtures/v2-full/Dockerfile
@@ -0,0 +1,4 @@
+
+FROM busybox:latest
+RUN echo something
+CMD top
diff --git a/tests/fixtures/v2-full/docker-compose.yml b/tests/fixtures/v2-full/docker-compose.yml
new file mode 100644
index 00000000..a973dd0c
--- /dev/null
+++ b/tests/fixtures/v2-full/docker-compose.yml
@@ -0,0 +1,24 @@
+
+version: "2"
+
+volumes:
+ data:
+ driver: local
+
+networks:
+ front: {}
+
+services:
+ web:
+ build: .
+ networks:
+ - front
+ - default
+ volumes_from:
+ - other
+
+ other:
+ image: busybox:latest
+ command: top
+ volumes:
+ - /data
diff --git a/tests/fixtures/v2-simple/docker-compose.yml b/tests/fixtures/v2-simple/docker-compose.yml
new file mode 100644
index 00000000..c99ae02f
--- /dev/null
+++ b/tests/fixtures/v2-simple/docker-compose.yml
@@ -0,0 +1,8 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v2-simple/links-invalid.yml b/tests/fixtures/v2-simple/links-invalid.yml
new file mode 100644
index 00000000..481aa404
--- /dev/null
+++ b/tests/fixtures/v2-simple/links-invalid.yml
@@ -0,0 +1,10 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ links:
+ - another
+ another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v3-full/docker-compose.yml b/tests/fixtures/v3-full/docker-compose.yml
new file mode 100644
index 00000000..2bc0e248
--- /dev/null
+++ b/tests/fixtures/v3-full/docker-compose.yml
@@ -0,0 +1,57 @@
+version: "3.2"
+services:
+ web:
+ image: busybox
+
+ deploy:
+ mode: replicated
+ replicas: 6
+ labels: [FOO=BAR]
+ update_config:
+ parallelism: 3
+ delay: 10s
+ failure_action: continue
+ monitor: 60s
+ max_failure_ratio: 0.3
+ resources:
+ limits:
+ cpus: '0.001'
+ memory: 50M
+ reservations:
+ cpus: '0.0001'
+ memory: 20M
+ restart_policy:
+ condition: on_failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+ placement:
+ constraints: [node=foo]
+
+ healthcheck:
+ test: cat /etc/passwd
+ interval: 10s
+ timeout: 1s
+ retries: 5
+
+ volumes:
+ - source: /host/path
+ target: /container/path
+ type: bind
+ read_only: true
+ - source: foobar
+ type: volume
+ target: /container/volumepath
+ - type: volume
+ target: /anonymous
+ - type: volume
+ source: foobar
+ target: /container/volumepath2
+ volume:
+ nocopy: true
+
+ stop_grace_period: 20s
+volumes:
+ foobar:
+ labels:
+ com.docker.compose.test: 'true'
diff --git a/tests/fixtures/volume-path-interpolation/docker-compose.yml b/tests/fixtures/volume-path-interpolation/docker-compose.yml
new file mode 100644
index 00000000..6d4e236a
--- /dev/null
+++ b/tests/fixtures/volume-path-interpolation/docker-compose.yml
@@ -0,0 +1,5 @@
+test:
+ image: busybox
+ command: top
+ volumes:
+ - "~/${VOLUME_NAME}:/container-path"
diff --git a/tests/fixtures/volume-path/common/services.yml b/tests/fixtures/volume-path/common/services.yml
new file mode 100644
index 00000000..2dbf7596
--- /dev/null
+++ b/tests/fixtures/volume-path/common/services.yml
@@ -0,0 +1,5 @@
+db:
+ image: busybox
+ volumes:
+ - ./foo:/foo
+ - ./bar:/bar
diff --git a/tests/fixtures/volume-path/docker-compose.yml b/tests/fixtures/volume-path/docker-compose.yml
new file mode 100644
index 00000000..af433c52
--- /dev/null
+++ b/tests/fixtures/volume-path/docker-compose.yml
@@ -0,0 +1,6 @@
+db:
+ extends:
+ file: common/services.yml
+ service: db
+ volumes:
+ - ./bar:/bar
diff --git a/tests/fixtures/volume/docker-compose.yml b/tests/fixtures/volume/docker-compose.yml
new file mode 100644
index 00000000..4335b0a0
--- /dev/null
+++ b/tests/fixtures/volume/docker-compose.yml
@@ -0,0 +1,11 @@
+version: '2'
+services:
+ test:
+ image: busybox
+ command: top
+ volumes:
+ - /container-path
+ - testvolume:/container-named-path
+
+volumes:
+ testvolume: {}
diff --git a/tests/fixtures/volumes-from-container/docker-compose.yml b/tests/fixtures/volumes-from-container/docker-compose.yml
new file mode 100644
index 00000000..495fcaae
--- /dev/null
+++ b/tests/fixtures/volumes-from-container/docker-compose.yml
@@ -0,0 +1,5 @@
+version: "2"
+services:
+ test:
+ image: busybox
+ volumes_from: ["container:composetest_data_container"]
diff --git a/tests/fixtures/volumes/docker-compose.yml b/tests/fixtures/volumes/docker-compose.yml
new file mode 100644
index 00000000..da711ac4
--- /dev/null
+++ b/tests/fixtures/volumes/docker-compose.yml
@@ -0,0 +1,2 @@
+version: '2.1'
+services: {}
diff --git a/tests/fixtures/volumes/external-volumes-v2-x.yml b/tests/fixtures/volumes/external-volumes-v2-x.yml
new file mode 100644
index 00000000..3b736c5f
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2-x.yml
@@ -0,0 +1,17 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v2.yml b/tests/fixtures/volumes/external-volumes-v2.yml
new file mode 100644
index 00000000..4025b53b
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-4.yml b/tests/fixtures/volumes/external-volumes-v3-4.yml
new file mode 100644
index 00000000..76c8421d
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-4.yml
@@ -0,0 +1,17 @@
+version: "3.4"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-x.yml b/tests/fixtures/volumes/external-volumes-v3-x.yml
new file mode 100644
index 00000000..903fee64
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-x.yml
@@ -0,0 +1,16 @@
+version: "3.0"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/volume-label.yml b/tests/fixtures/volumes/volume-label.yml
new file mode 100644
index 00000000..a5f33a5a
--- /dev/null
+++ b/tests/fixtures/volumes/volume-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - volume_with_label:/data
+
+volumes:
+ volume_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 00000000..a93de993
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,50 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+
+from compose.config.config import ConfigDetails
+from compose.config.config import ConfigFile
+from compose.config.config import load
+
+
+def build_config(contents, **kwargs):
+ return load(build_config_details(contents, **kwargs))
+
+
+def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
+ return ConfigDetails(
+ working_dir,
+ [ConfigFile(filename, contents)],
+ )
+
+
+def create_host_file(client, filename):
+ dirname = os.path.dirname(filename)
+
+ with open(filename, 'r') as fh:
+ content = fh.read()
+
+ container = client.create_container(
+ 'busybox:latest',
+ ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
+ volumes={dirname: {}},
+ host_config=client.create_host_config(
+ binds={dirname: {'bind': dirname, 'ro': False}},
+ network_mode='none',
+ ),
+ )
+ try:
+ client.start(container)
+ exitcode = client.wait(container)
+
+ if exitcode != 0:
+ output = client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}".format(exitcode, output))
+
+ container_info = client.inspect_container(container)
+ if 'Node' in container_info:
+ return container_info['Node']['Name']
+ finally:
+ client.remove_container(container, force=True)
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/__init__.py
diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py
new file mode 100644
index 00000000..2ff610fb
--- /dev/null
+++ b/tests/integration/network_test.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .testcases import DockerClientTestCase
+from compose.const import LABEL_NETWORK
+from compose.const import LABEL_PROJECT
+from compose.network import Network
+
+
+class NetworkTest(DockerClientTestCase):
+ def test_network_default_labels(self):
+ net = Network(self.client, 'composetest', 'foonet')
+ net.ensure()
+ net_data = net.inspect()
+ labels = net_data['Labels']
+ assert labels[LABEL_NETWORK] == net.name
+ assert labels[LABEL_PROJECT] == net.project
diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py
new file mode 100644
index 00000000..953dd52b
--- /dev/null
+++ b/tests/integration/project_test.py
@@ -0,0 +1,1636 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os.path
+import random
+
+import py
+import pytest
+from docker.errors import APIError
+from docker.errors import NotFound
+
+from .. import mock
+from ..helpers import build_config as load_config
+from ..helpers import create_host_file
+from .testcases import DockerClientTestCase
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
+from compose.config import config
+from compose.config import ConfigurationError
+from compose.config import types
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.container import Container
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
+from compose.project import Project
+from compose.project import ProjectError
+from compose.service import ConvergenceStrategy
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+
+def build_config(**kwargs):
+ return config.Config(
+ version=kwargs.get('version'),
+ services=kwargs.get('services'),
+ volumes=kwargs.get('volumes'),
+ networks=kwargs.get('networks'),
+ secrets=kwargs.get('secrets'),
+ configs=kwargs.get('configs'),
+ )
+
+
+class ProjectTest(DockerClientTestCase):
+
+ def test_containers(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+
+ containers = project.containers()
+ self.assertEqual(len(containers), 2)
+
+ @pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug')
+ def test_containers_stopped(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+ assert len(project.containers()) == 2
+ assert len(project.containers(stopped=True)) == 2
+
+ project.stop()
+ assert len(project.containers()) == 0
+ assert len(project.containers(stopped=True)) == 2
+
+ def test_containers_with_service_names(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+
+ containers = project.containers(['web'])
+ self.assertEqual(
+ [c.name for c in containers],
+ ['composetest_web_1'])
+
+ def test_containers_with_extra_service(self):
+ web = self.create_service('web')
+ web_1 = web.create_container()
+
+ db = self.create_service('db')
+ db_1 = db.create_container()
+
+ self.create_service('extra').create_container()
+
+ project = Project('composetest', [web, db], self.client)
+ self.assertEqual(
+ set(project.containers(stopped=True)),
+ set([web_1, db_1]),
+ )
+
+ def test_volumes_from_service(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'data': {
+ 'image': 'busybox:latest',
+ 'volumes': ['/var/data'],
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['data'],
+ },
+ }),
+ client=self.client,
+ )
+ db = project.get_service('db')
+ data = project.get_service('data')
+ self.assertEqual(db.volumes_from, [VolumeFromSpec(data, 'rw', 'service')])
+
+ def test_volumes_from_container(self):
+ data_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ volumes=['/var/data'],
+ name='composetest_data_container',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['composetest_data_container'],
+ },
+ }),
+ client=self.client,
+ )
+ db = project.get_service('db')
+ self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw'])
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_network_mode_from_service(self):
+ project = Project.from_config(
+ name='composetest',
+ client=self.client,
+ config_data=load_config({
+ 'version': str(V2_0),
+ 'services': {
+ 'net': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'network_mode': 'service:net',
+ 'command': ["top"]
+ },
+ },
+ }),
+ )
+
+ project.up()
+
+ web = project.get_service('web')
+ net = project.get_service('net')
+ self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_network_mode_from_container(self):
+ def get_project():
+ return Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'version': str(V2_0),
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'network_mode': 'container:composetest_net_container'
+ },
+ },
+ }),
+ client=self.client,
+ )
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ get_project()
+
+ assert "container 'composetest_net_container' which does not exist" in excinfo.exconly()
+
+ net_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ name='composetest_net_container',
+ command='top',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ net_container.start()
+
+ project = get_project()
+ project.up()
+
+ web = project.get_service('web')
+ self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+
+ @no_cluster('container networks not supported in Swarm')
+ def test_net_from_service_v1(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'net': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'net': 'container:net',
+ 'command': ["top"]
+ },
+ }),
+ client=self.client,
+ )
+
+ project.up()
+
+ web = project.get_service('web')
+ net = project.get_service('net')
+ self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+
+ @no_cluster('container networks not supported in Swarm')
+ def test_net_from_container_v1(self):
+ def get_project():
+ return Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'web': {
+ 'image': 'busybox:latest',
+ 'net': 'container:composetest_net_container'
+ },
+ }),
+ client=self.client,
+ )
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ get_project()
+
+ assert "container 'composetest_net_container' which does not exist" in excinfo.exconly()
+
+ net_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ name='composetest_net_container',
+ command='top',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ net_container.start()
+
+ project = get_project()
+ project.up()
+
+ web = project.get_service('web')
+ self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+
+ def test_start_pause_unpause_stop_kill_remove(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.start()
+
+ self.assertEqual(len(web.containers()), 0)
+ self.assertEqual(len(db.containers()), 0)
+
+ web_container_1 = web.create_container()
+ web_container_2 = web.create_container()
+ db_container = db.create_container()
+
+ project.start(service_names=['web'])
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running),
+ set([web_container_1.name, web_container_2.name]))
+
+ project.start()
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running),
+ set([web_container_1.name, web_container_2.name, db_container.name]))
+
+ project.pause(service_names=['web'])
+ self.assertEqual(
+ set([c.name for c in project.containers() if c.is_paused]),
+ set([web_container_1.name, web_container_2.name]))
+
+ project.pause()
+ self.assertEqual(
+ set([c.name for c in project.containers() if c.is_paused]),
+ set([web_container_1.name, web_container_2.name, db_container.name]))
+
+ project.unpause(service_names=['db'])
+ self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2)
+
+ project.unpause()
+ self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0)
+
+ project.stop(service_names=['web'], timeout=1)
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running), set([db_container.name])
+ )
+
+ project.kill(service_names=['db'])
+ self.assertEqual(len([c for c in project.containers() if c.is_running]), 0)
+ self.assertEqual(len(project.containers(stopped=True)), 3)
+
+ project.remove_stopped(service_names=['web'])
+ self.assertEqual(len(project.containers(stopped=True)), 1)
+
+ project.remove_stopped()
+ self.assertEqual(len(project.containers(stopped=True)), 0)
+
+ def test_create(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+
+ project.create(['db'])
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ assert not containers[0].is_running
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ assert len(web.containers(stopped=True)) == 0
+
+ def test_create_twice(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+
+ project.create(['db', 'web'])
+ project.create(['db', 'web'])
+ containers = project.containers(stopped=True)
+ assert len(containers) == 2
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ web_containers = web.containers(stopped=True)
+ assert len(web_containers) == 1
+ assert not web_containers[0].is_running
+
+ def test_create_with_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'db')])
+ project = Project('composetest', [db, web], self.client)
+
+ project.create(['web'])
+ # self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers(stopped=True)) == 2
+ assert not [c for c in project.containers(stopped=True) if c.is_running]
+ assert len(db.containers(stopped=True)) == 1
+ assert len(web.containers(stopped=True)) == 1
+
+ def test_create_strategy_always(self):
+ db = self.create_service('db')
+ project = Project('composetest', [db], self.client)
+ project.create(['db'])
+ old_id = project.containers(stopped=True)[0].id
+
+ project.create(['db'], strategy=ConvergenceStrategy.always)
+ assert len(project.containers(stopped=True)) == 1
+
+ db_container = project.containers(stopped=True)[0]
+ assert not db_container.is_running
+ assert db_container.id != old_id
+
+ def test_create_strategy_never(self):
+ db = self.create_service('db')
+ project = Project('composetest', [db], self.client)
+ project.create(['db'])
+ old_id = project.containers(stopped=True)[0].id
+
+ project.create(['db'], strategy=ConvergenceStrategy.never)
+ assert len(project.containers(stopped=True)) == 1
+
+ db_container = project.containers(stopped=True)[0]
+ assert not db_container.is_running
+ assert db_container.id == old_id
+
+ def test_project_up(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(web.containers()), 0)
+
+ def test_project_up_starts_uncreated_services(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'db')])
+ project = Project('composetest', [db, web], self.client)
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+
+ project.up()
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(web.containers()), 1)
+
+ def test_recreate_preserves_volumes(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ old_db_id = project.containers()[0].id
+ db_volume_path = project.containers()[0].get('Volumes./etc')
+
+ project.up(strategy=ConvergenceStrategy.always)
+ self.assertEqual(len(project.containers()), 2)
+
+ db_container = [c for c in project.containers() if 'db' in c.name][0]
+ self.assertNotEqual(db_container.id, old_db_id)
+ self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
+
+ def test_project_up_with_no_recreate_running(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ old_db_id = project.containers()[0].id
+ container, = project.containers()
+ db_volume_path = container.get_mount('/var/db')['Source']
+
+ project.up(strategy=ConvergenceStrategy.never)
+ self.assertEqual(len(project.containers()), 2)
+
+ db_container = [c for c in project.containers() if 'db' in c.name][0]
+ self.assertEqual(db_container.id, old_db_id)
+ self.assertEqual(
+ db_container.get_mount('/var/db')['Source'],
+ db_volume_path)
+
+ def test_project_up_with_no_recreate_stopped(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ project.kill()
+
+ old_containers = project.containers(stopped=True)
+
+ self.assertEqual(len(old_containers), 1)
+ old_container, = old_containers
+ old_db_id = old_container.id
+ db_volume_path = old_container.get_mount('/var/db')['Source']
+
+ project.up(strategy=ConvergenceStrategy.never)
+
+ new_containers = project.containers(stopped=True)
+ self.assertEqual(len(new_containers), 2)
+ self.assertEqual([c.is_running for c in new_containers], [True, True])
+
+ db_container = [c for c in new_containers if 'db' in c.name][0]
+ self.assertEqual(db_container.id, old_db_id)
+ self.assertEqual(
+ db_container.get_mount('/var/db')['Source'],
+ db_volume_path)
+
+ def test_project_up_without_all_services(self):
+ console = self.create_service('console')
+ db = self.create_service('db')
+ project = Project('composetest', [console, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up()
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 1)
+
+ def test_project_up_starts_links(self):
+ console = self.create_service('console')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ web = self.create_service('web', links=[(db, 'db')])
+
+ project = Project('composetest', [web, db, console], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['web'])
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_project_up_starts_depends(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'console': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ },
+ 'data': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'volumes_from': ['data'],
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'links': ['db'],
+ },
+ }),
+ client=self.client,
+ )
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['web'])
+ self.assertEqual(len(project.containers()), 3)
+ self.assertEqual(len(project.get_service('web').containers()), 1)
+ self.assertEqual(len(project.get_service('db').containers()), 1)
+ self.assertEqual(len(project.get_service('data').containers()), 1)
+ self.assertEqual(len(project.get_service('console').containers()), 0)
+
+ def test_project_up_with_no_deps(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'console': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ },
+ 'data': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'volumes_from': ['data'],
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'links': ['db'],
+ },
+ }),
+ client=self.client,
+ )
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'], start_deps=False)
+ self.assertEqual(len(project.containers(stopped=True)), 2)
+ self.assertEqual(len(project.get_service('web').containers()), 0)
+ self.assertEqual(len(project.get_service('db').containers()), 1)
+ self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
+ assert not project.get_service('data').containers(stopped=True)[0].is_running
+ self.assertEqual(len(project.get_service('console').containers()), 0)
+
+ def test_project_up_recreate_with_tmpfs_volume(self):
+ # https://github.com/docker/compose/issues/4751
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'version': '2.1',
+ 'services': {
+ 'foo': {
+ 'image': 'busybox:latest',
+ 'tmpfs': ['/dev/shm'],
+ 'volumes': ['/dev/shm']
+ }
+ }
+ }), client=self.client
+ )
+ project.up()
+ project.up(strategy=ConvergenceStrategy.always)
+
+ def test_unscale_after_restart(self):
+ web = self.create_service('web')
+ project = Project('composetest', [web], self.client)
+
+ project.start()
+
+ service = project.get_service('web')
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(3)
+ self.assertEqual(len(service.containers()), 3)
+ project.up()
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ project.up(scale_override={'web': 3})
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 3)
+ # does scale=0 ,makes any sense? after recreating at least 1 container is running
+ service.scale(0)
+ project.up()
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 1)
+
+ @v2_only()
+ def test_project_up_networks(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'foo': None,
+ 'bar': None,
+ 'baz': {'aliases': ['extra']},
+ },
+ }],
+ networks={
+ 'foo': {'driver': 'bridge'},
+ 'bar': {'driver': None},
+ 'baz': {},
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ containers = project.containers()
+ assert len(containers) == 1
+ container, = containers
+
+ for net_name in ['foo', 'bar', 'baz']:
+ full_net_name = 'composetest_{}'.format(net_name)
+ network_data = self.client.inspect_network(full_net_name)
+ assert network_data['Name'] == full_net_name
+
+ aliases_key = 'NetworkSettings.Networks.{net}.Aliases'
+ assert 'web' in container.get(aliases_key.format(net='composetest_foo'))
+ assert 'web' in container.get(aliases_key.format(net='composetest_baz'))
+ assert 'extra' in container.get(aliases_key.format(net='composetest_baz'))
+
+ foo_data = self.client.inspect_network('composetest_foo')
+ assert foo_data['Driver'] == 'bridge'
+
+ @v2_only()
+ def test_up_with_ipam_config(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'front': None},
+ }],
+ networks={
+ 'front': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.bridge.enable_icc": "false",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [{
+ "subnet": "172.28.0.0/16",
+ "ip_range": "172.28.5.0/24",
+ "gateway": "172.28.5.254",
+ "aux_addresses": {
+ "a": "172.28.1.5",
+ "b": "172.28.1.6",
+ "c": "172.28.1.7",
+ },
+ }],
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_front'])[0]
+
+ assert network['Options'] == {
+ "com.docker.network.bridge.enable_icc": "false"
+ }
+
+ assert network['IPAM'] == {
+ 'Driver': 'default',
+ 'Options': None,
+ 'Config': [{
+ 'Subnet': "172.28.0.0/16",
+ 'IPRange': "172.28.5.0/24",
+ 'Gateway': "172.28.5.254",
+ 'AuxiliaryAddresses': {
+ 'a': '172.28.1.5',
+ 'b': '172.28.1.6',
+ 'c': '172.28.1.7',
+ },
+ }],
+ }
+
+ @v2_only()
+ def test_up_with_ipam_options(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'front': None},
+ }],
+ networks={
+ 'front': {
+ 'driver': 'bridge',
+ 'ipam': {
+ 'driver': 'default',
+ 'options': {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_front'])[0]
+
+ assert network['IPAM']['Options'] == {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+
+ @v2_1_only()
+ def test_up_with_network_static_addresses(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'static_test': {
+ 'ipv4_address': '172.16.100.100',
+ 'ipv6_address': 'fe80::1001:102'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.enable_ipv6": "true",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {"subnet": "172.16.100.0/24",
+ "gateway": "172.16.100.1"},
+ {"subnet": "fe80::/64",
+ "gateway": "fe80::1001:1"}
+ ]
+ },
+ 'enable_ipv6': True,
+ }
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up(detached=True)
+
+ service_container = project.get_service('web').containers()[0]
+
+ IPAMConfig = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert IPAMConfig.get('IPv4Address') == '172.16.100.100'
+ assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102'
+
+ @v2_1_only()
+ def test_up_with_enable_ipv6(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'static_test': {
+ 'ipv6_address': 'fe80::1001:102'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'enable_ipv6': True,
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {"subnet": "fe80::/64",
+ "gateway": "fe80::1001:1"}
+ ]
+ }
+ }
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up(detached=True)
+ network = [n for n in self.client.networks() if 'static_test' in n['Name']][0]
+ service_container = project.get_service('web').containers()[0]
+
+ assert network['EnableIPv6'] is True
+ ipam_config = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert ipam_config.get('IPv6Address') == 'fe80::1001:102'
+
+ @v2_only()
+ def test_up_with_network_static_addresses_missing_subnet(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'static_test': {
+ 'ipv4_address': '172.16.100.100',
+ 'ipv6_address': 'fe80::1001:101'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.enable_ipv6": "true",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+
+ with self.assertRaises(ProjectError):
+ project.up()
+
+ @v2_1_only()
+ def test_up_with_network_link_local_ips(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'linklocaltest': {
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }],
+ networks={
+ 'linklocaltest': {'driver': 'bridge'}
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ ipam_config = service_container.inspect().get(
+ 'NetworkSettings', {}
+ ).get(
+ 'Networks', {}
+ ).get(
+ 'composetest_linklocaltest', {}
+ ).get('IPAMConfig', {})
+ assert 'LinkLocalIPs' in ipam_config
+ assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
+
+ @v2_1_only()
+ def test_up_with_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'default'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ assert service_container.inspect()['HostConfig']['Isolation'] == 'default'
+
+ @v2_1_only()
+ def test_up_with_invalid_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'foobar'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ with self.assertRaises(ProjectError):
+ project.up()
+
+ @v2_only()
+ def test_project_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'internal': None},
+ }],
+ networks={
+ 'internal': {'driver': 'bridge', 'internal': True},
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_internal'])[0]
+
+ assert network['Internal'] is True
+
+ @v2_1_only()
+ def test_project_up_with_network_label(self):
+ self.require_api_version('1.23')
+
+ network_name = 'network_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {network_name: None}
+ }],
+ networks={
+ network_name: {'labels': {'label_key': 'label_val'}}
+ }
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+
+ project.up()
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].startswith('composetest_')
+ ]
+
+ assert [n['Name'] for n in networks] == ['composetest_{}'.format(network_name)]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_project_up_volumes(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.up()
+ self.assertEqual(len(project.containers()), 1)
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v2_1_only()
+ def test_project_up_with_volume_labels(self):
+ self.require_api_version('1.23')
+
+ volume_name = 'volume_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'volumes': [VolumeSpec.parse('{}:/data'.format(volume_name))]
+ }],
+ volumes={
+ volume_name: {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+
+ project.up()
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('composetest_')
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set(
+ ['composetest_{}'.format(volume_name)]
+ )
+
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_project_up_logging_with_multiple_files(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {'image': 'busybox:latest', 'command': 'top'},
+ 'another': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'logging': {
+ 'driver': "json-file",
+ 'options': {
+ 'max-size': "10m"
+ }
+ }
+ }
+ }
+
+ })
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'another': {
+ 'logging': {
+ 'driver': "none"
+ }
+ }
+ }
+
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ tmpdir = py.test.ensuretemp('logging_test')
+ self.addCleanup(tmpdir.remove)
+ with tmpdir.as_cwd():
+ config_data = config.load(details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ self.assertEqual(len(containers), 2)
+
+ another = project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ @v2_only()
+ def test_project_up_port_mappings_with_multiple_files(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': ['1234:1234']
+ },
+ },
+
+ })
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'ports': ['1234:1234']
+ }
+ }
+
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ config_data = config.load(details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ self.assertEqual(len(containers), 1)
+
+ @v2_2_only()
+ def test_project_up_config_scale(self):
+ config_data = build_config(
+ version=V2_2,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'scale': 3
+ }]
+ )
+
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ assert len(project.containers()) == 3
+
+ project.up(scale_override={'web': 2})
+ assert len(project.containers()) == 2
+
+ project.up(scale_override={'web': 4})
+ assert len(project.containers()) == 4
+
+ project.stop()
+ project.up()
+ assert len(project.containers()) == 3
+
+ @v2_only()
+ def test_initialize_volumes(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ assert volume_data['Driver'] == 'local'
+
+ @v2_only()
+ def test_project_up_implicit_volume_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.up()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v3_only()
+ def test_project_up_with_secrets(self):
+ node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default'))
+
+ config_data = build_config(
+ version=V3_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'cat /run/secrets/special',
+ 'secrets': [
+ types.ServiceSecret.parse({'source': 'super', 'target': 'special'}),
+ ],
+ 'environment': ['constraint:node=={}'.format(node if node is not None else '*')]
+ }],
+ secrets={
+ 'super': {
+ 'file': os.path.abspath('tests/fixtures/secrets/default'),
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+ project.stop()
+
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ container, = containers
+
+ output = container.logs()
+ assert output == b"This is the secret\n"
+
+ @v2_only()
+ def test_initialize_volumes_invalid_volume_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'foobar'}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ with self.assertRaises(APIError if is_cluster(self.client) else config.ConfigurationError):
+ project.volumes.initialize()
+
+ @v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_initialize_volumes_updated_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ config_data = config_data._replace(
+ volumes={vol_name: {'driver': 'smb'}}
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+ with self.assertRaises(config.ConfigurationError) as e:
+ project.volumes.initialize()
+ assert 'Configuration for volume {0} specifies driver smb'.format(
+ vol_name
+ ) in str(e.exception)
+
+ @v2_only()
+ def test_initialize_volumes_updated_blank_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ config_data = config_data._replace(
+ volumes={vol_name: {}}
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+ project.volumes.initialize()
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_initialize_volumes_external_volumes(self):
+ # Use composetest_ prefix so it gets garbage-collected in tearDown()
+ vol_name = 'composetest_{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ self.client.create_volume(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={
+ vol_name: {'external': True, 'name': vol_name}
+ },
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ with self.assertRaises(NotFound):
+ self.client.inspect_volume(full_vol_name)
+
+ @v2_only()
+ def test_initialize_volumes_inexistent_external_volume(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={
+ vol_name: {'external': True, 'name': vol_name}
+ },
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ with self.assertRaises(config.ConfigurationError) as e:
+ project.volumes.initialize()
+ assert 'Volume {0} declared as external'.format(
+ vol_name
+ ) in str(e.exception)
+
+ @v2_only()
+ def test_project_up_named_volumes_in_binds(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'volumes': ['{0}:/data'.format(vol_name)]
+ },
+ },
+ 'volumes': {
+ vol_name: {'driver': 'local'}
+ }
+
+ })
+ config_details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(config_details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ service = project.services[0]
+ self.assertEqual(service.name, 'simple')
+ volumes = service.options.get('volumes')
+ self.assertEqual(len(volumes), 1)
+ self.assertEqual(volumes[0].external, full_vol_name)
+ project.up()
+ engine_volumes = self.client.volumes()['Volumes']
+ container = service.get_container()
+ assert [mount['Name'] for mount in container.get('Mounts')] == [full_vol_name]
+ assert next((v for v in engine_volumes if v['Name'] == vol_name), None) is None
+
+ def test_project_up_orphans(self):
+ config_dict = {
+ 'service1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ }
+ }
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ config_dict['service2'] = config_dict['service1']
+ del config_dict['service1']
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with mock.patch('compose.project.log') as mock_log:
+ project.up()
+
+ mock_log.warning.assert_called_once_with(mock.ANY)
+
+ assert len([
+ ctnr for ctnr in project._labeled_containers()
+ if ctnr.labels.get(LABEL_SERVICE) == 'service1'
+ ]) == 1
+
+ project.up(remove_orphans=True)
+
+ assert len([
+ ctnr for ctnr in project._labeled_containers()
+ if ctnr.labels.get(LABEL_SERVICE) == 'service1'
+ ]) == 0
+
+ @v2_1_only()
+ def test_project_up_healthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 0',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 2
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ assert svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_unhealthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(HealthCheckFailed):
+ svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_no_healthcheck_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'disable': True
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(NoHealthCheckConfigured):
+ svc1.is_healthy()
diff --git a/tests/integration/resilience_test.py b/tests/integration/resilience_test.py
new file mode 100644
index 00000000..2a2d1b56
--- /dev/null
+++ b/tests/integration/resilience_test.py
@@ -0,0 +1,57 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .. import mock
+from .testcases import DockerClientTestCase
+from compose.config.types import VolumeSpec
+from compose.project import Project
+from compose.service import ConvergenceStrategy
+
+
+class ResilienceTest(DockerClientTestCase):
+ def setUp(self):
+ self.db = self.create_service(
+ 'db',
+ volumes=[VolumeSpec.parse('/var/db')],
+ command='top')
+ self.project = Project('composetest', [self.db], self.client)
+
+ container = self.db.create_container()
+ self.db.start_container(container)
+ self.host_path = container.get_mount('/var/db')['Source']
+
+ def tearDown(self):
+ del self.project
+ del self.db
+ super(ResilienceTest, self).tearDown()
+
+ def test_successful_recreate(self):
+ self.project.up(strategy=ConvergenceStrategy.always)
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+ def test_create_failure(self):
+ with mock.patch('compose.service.Service.create_container', crash):
+ with self.assertRaises(Crash):
+ self.project.up(strategy=ConvergenceStrategy.always)
+
+ self.project.up()
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+ def test_start_failure(self):
+ with mock.patch('compose.service.Service.start_container', crash):
+ with self.assertRaises(Crash):
+ self.project.up(strategy=ConvergenceStrategy.always)
+
+ self.project.up()
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+
+class Crash(Exception):
+ pass
+
+
+def crash(*args, **kwargs):
+ raise Crash()
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
new file mode 100644
index 00000000..3ddf991b
--- /dev/null
+++ b/tests/integration/service_test.py
@@ -0,0 +1,1380 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from distutils.spawn import find_executable
+from os import path
+
+import pytest
+from docker.errors import APIError
+from six import StringIO
+from six import text_type
+
+from .. import mock
+from .testcases import DockerClientTestCase
+from .testcases import get_links
+from .testcases import pull_busybox
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
+from .testcases import SWARM_SKIP_CPU_SHARES
+from compose import __version__
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.const import LABEL_CONFIG_HASH
+from compose.const import LABEL_CONTAINER_NUMBER
+from compose.const import LABEL_ONE_OFF
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.const import LABEL_VERSION
+from compose.container import Container
+from compose.errors import OperationFailedError
+from compose.project import OneOffFilter
+from compose.service import ConvergencePlan
+from compose.service import ConvergenceStrategy
+from compose.service import NetworkMode
+from compose.service import PidMode
+from compose.service import Service
+from compose.utils import parse_nanoseconds_int
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_3_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+
+def create_and_start_container(service, **override_options):
+ container = service.create_container(**override_options)
+ return service.start_container(container)
+
+
+class ServiceTest(DockerClientTestCase):
+
+ def test_containers(self):
+ foo = self.create_service('foo')
+ bar = self.create_service('bar')
+
+ create_and_start_container(foo)
+
+ self.assertEqual(len(foo.containers()), 1)
+ self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
+ self.assertEqual(len(bar.containers()), 0)
+
+ create_and_start_container(bar)
+ create_and_start_container(bar)
+
+ self.assertEqual(len(foo.containers()), 1)
+ self.assertEqual(len(bar.containers()), 2)
+
+ names = [c.name for c in bar.containers()]
+ self.assertIn('composetest_bar_1', names)
+ self.assertIn('composetest_bar_2', names)
+
+ def test_containers_one_off(self):
+ db = self.create_service('db')
+ container = db.create_container(one_off=True)
+ self.assertEqual(db.containers(stopped=True), [])
+ self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container])
+
+ def test_project_is_added_to_container_name(self):
+ service = self.create_service('web')
+ create_and_start_container(service)
+ self.assertEqual(service.containers()[0].name, 'composetest_web_1')
+
+ def test_create_container_with_one_off(self):
+ db = self.create_service('db')
+ container = db.create_container(one_off=True)
+ self.assertEqual(container.name, 'composetest_db_run_1')
+
+ def test_create_container_with_one_off_when_existing_container_is_running(self):
+ db = self.create_service('db')
+ db.start()
+ container = db.create_container(one_off=True)
+ self.assertEqual(container.name, 'composetest_db_run_1')
+
+ def test_create_container_with_unspecified_volume(self):
+ service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get_mount('/var/db')
+
+ def test_create_container_with_volume_driver(self):
+ service = self.create_service('db', volume_driver='foodriver')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
+
+ @pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug')
+ def test_create_container_with_cpu_shares(self):
+ service = self.create_service('db', cpu_shares=73)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuShares'), 73)
+
+ def test_create_container_with_cpu_quota(self):
+ service = self.create_service('db', cpu_quota=40000)
+ container = service.create_container()
+ container.start()
+ self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
+
+ @v2_2_only()
+ def test_create_container_with_cpu_count(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_count=2)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuCount'), 2)
+
+ @v2_2_only()
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux')
+ def test_create_container_with_cpu_percent(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_percent=12)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuPercent'), 12)
+
+ @v2_2_only()
+ def test_create_container_with_cpus(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpus=1)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.NanoCpus'), 1000000000)
+
+ def test_create_container_with_shm_size(self):
+ self.require_api_version('1.22')
+ service = self.create_service('db', shm_size=67108864)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
+
+ def test_create_container_with_init_bool(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', init=True)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.Init') is True
+
+ @pytest.mark.xfail(True, reason='Option has been removed in Engine 17.06.0')
+ def test_create_container_with_init_path(self):
+ self.require_api_version('1.25')
+ docker_init_path = find_executable('docker-init')
+ service = self.create_service('db', init=docker_init_path)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.InitPath') == docker_init_path
+
+ @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
+ def test_create_container_with_pids_limit(self):
+ self.require_api_version('1.23')
+ service = self.create_service('db', pids_limit=10)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.PidsLimit') == 10
+
+ def test_create_container_with_extra_hosts_list(self):
+ extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
+ service = self.create_service('db', extra_hosts=extra_hosts)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
+
+ def test_create_container_with_extra_hosts_dicts(self):
+ extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
+ extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
+ service = self.create_service('db', extra_hosts=extra_hosts)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
+
+ def test_create_container_with_cpu_set(self):
+ service = self.create_service('db', cpuset='0')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
+
+ def test_create_container_with_read_only_root_fs(self):
+ read_only = True
+ service = self.create_service('db', read_only=read_only)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.ReadonlyRootfs') == read_only
+
+ def test_create_container_with_blkio_config(self):
+ blkio_config = {
+ 'weight': 300,
+ 'weight_device': [{'path': '/dev/sda', 'weight': 200}],
+ 'device_read_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024 * 100}],
+ 'device_read_iops': [{'path': '/dev/sda', 'rate': 1000}],
+ 'device_write_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024}],
+ 'device_write_iops': [{'path': '/dev/sda', 'rate': 800}]
+ }
+ service = self.create_service('web', blkio_config=blkio_config)
+ container = service.create_container()
+ assert container.get('HostConfig.BlkioWeight') == 300
+ assert container.get('HostConfig.BlkioWeightDevice') == [{
+ 'Path': '/dev/sda', 'Weight': 200
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024 * 100
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 1000
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 800
+ }]
+
+ def test_create_container_with_security_opt(self):
+ security_opt = ['label:disable']
+ service = self.create_service('db', security_opt=security_opt)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
+
+ # @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ @pytest.mark.skipif(True, reason='https://github.com/moby/moby/issues/34270')
+ def test_create_container_with_storage_opt(self):
+ storage_opt = {'size': '1G'}
+ service = self.create_service('db', storage_opt=storage_opt)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
+
+ def test_create_container_with_mac_address(self):
+ service = self.create_service('db', mac_address='02:42:ac:11:65:43')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
+
+ def test_create_container_with_specified_volume(self):
+ host_path = '/tmp/host-path'
+ container_path = '/container-path'
+
+ service = self.create_service(
+ 'db',
+ volumes=[VolumeSpec(host_path, container_path, 'rw')])
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get_mount(container_path)
+
+ # Match the last component ("host-path"), because boot2docker symlinks /tmp
+ actual_host_path = container.get_mount(container_path)['Source']
+
+ self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
+ msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
+
+ def test_create_container_with_healthcheck_config(self):
+ one_second = parse_nanoseconds_int('1s')
+ healthcheck = {
+ 'test': ['true'],
+ 'interval': 2 * one_second,
+ 'timeout': 5 * one_second,
+ 'retries': 5,
+ 'start_period': 2 * one_second
+ }
+ service = self.create_service('db', healthcheck=healthcheck)
+ container = service.create_container()
+ remote_healthcheck = container.get('Config.Healthcheck')
+ assert remote_healthcheck['Test'] == healthcheck['test']
+ assert remote_healthcheck['Interval'] == healthcheck['interval']
+ assert remote_healthcheck['Timeout'] == healthcheck['timeout']
+ assert remote_healthcheck['Retries'] == healthcheck['retries']
+ assert remote_healthcheck['StartPeriod'] == healthcheck['start_period']
+
+ def test_recreate_preserves_volume_with_trailing_slash(self):
+ """When the Compose file specifies a trailing slash in the container path, make
+ sure we copy the volume over when recreating.
+ """
+ service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')])
+ old_container = create_and_start_container(service)
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container = service.recreate_container(old_container)
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_duplicate_volume_trailing_slash(self):
+ """
+ When an image specifies a volume, and the Compose file specifies a host path
+ but adds a trailing slash, make sure that we don't create duplicate binds.
+ """
+ host_path = '/tmp/data'
+ container_path = '/data'
+ volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))]
+
+ tmp_container = self.client.create_container(
+ 'busybox', 'true',
+ volumes={container_path: {}},
+ labels={'com.docker.compose.test_image': 'true'},
+ host_config={}
+ )
+ image = self.client.commit(tmp_container)['Id']
+
+ service = self.create_service('db', image=image, volumes=volumes)
+ old_container = create_and_start_container(service)
+
+ self.assertEqual(
+ old_container.get('Config.Volumes'),
+ {container_path: {}},
+ )
+
+ service = self.create_service('db', image=image, volumes=volumes)
+ new_container = service.recreate_container(old_container)
+
+ self.assertEqual(
+ new_container.get('Config.Volumes'),
+ {container_path: {}},
+ )
+
+ self.assertEqual(service.containers(stopped=False), [new_container])
+
+ def test_create_container_with_volumes_from(self):
+ volume_service = self.create_service('data')
+ volume_container_1 = volume_service.create_container()
+ volume_container_2 = Container.create(
+ self.client,
+ image='busybox:latest',
+ command=["top"],
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
+ )
+ host_service = self.create_service(
+ 'host',
+ volumes_from=[
+ VolumeFromSpec(volume_service, 'rw', 'service'),
+ VolumeFromSpec(volume_container_2, 'rw', 'container')
+ ],
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
+ )
+ host_container = host_service.create_container()
+ host_service.start_container(host_container)
+ self.assertIn(volume_container_1.id + ':rw',
+ host_container.get('HostConfig.VolumesFrom'))
+ self.assertIn(volume_container_2.id + ':rw',
+ host_container.get('HostConfig.VolumesFrom'))
+
+ def test_execute_convergence_plan_recreate(self):
+ service = self.create_service(
+ 'db',
+ environment={'FOO': '1'},
+ volumes=[VolumeSpec.parse('/etc')],
+ entrypoint=['top'],
+ command=['-d', '1']
+ )
+ old_container = service.create_container()
+ self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
+ self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
+ self.assertIn('FOO=1', old_container.get('Config.Env'))
+ self.assertEqual(old_container.name, 'composetest_db_1')
+ service.start_container(old_container)
+ old_container.inspect() # reload volume data
+ volume_path = old_container.get_mount('/etc')['Source']
+
+ num_containers_before = len(self.client.containers(all=True))
+
+ service.options['environment']['FOO'] = '2'
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
+ self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
+ self.assertIn('FOO=2', new_container.get('Config.Env'))
+ self.assertEqual(new_container.name, 'composetest_db_1')
+ self.assertEqual(new_container.get_mount('/etc')['Source'], volume_path)
+ if not is_cluster(self.client):
+ assert (
+ 'affinity:container==%s' % old_container.id in
+ new_container.get('Config.Env')
+ )
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert old_container.get('Node.Name') == new_container.get('Node.Name')
+
+ self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
+ self.assertNotEqual(old_container.id, new_container.id)
+ self.assertRaises(APIError,
+ self.client.inspect_container,
+ old_container.id)
+
+ def test_execute_convergence_plan_recreate_twice(self):
+ service = self.create_service(
+ 'db',
+ volumes=[VolumeSpec.parse('/etc')],
+ entrypoint=['top'],
+ command=['-d', '1'])
+
+ orig_container = service.create_container()
+ service.start_container(orig_container)
+
+ orig_container.inspect() # reload volume data
+ volume_path = orig_container.get_mount('/etc')['Source']
+
+ # Do this twice to reproduce the bug
+ for _ in range(2):
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [orig_container]))
+
+ assert new_container.get_mount('/etc')['Source'] == volume_path
+ if not is_cluster(self.client):
+ assert ('affinity:container==%s' % orig_container.id in
+ new_container.get('Config.Env'))
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert orig_container.get('Node.Name') == new_container.get('Node.Name')
+
+ orig_container = new_container
+
+ def test_execute_convergence_plan_when_containers_are_stopped(self):
+ service = self.create_service(
+ 'db',
+ environment={'FOO': '1'},
+ volumes=[VolumeSpec.parse('/var/db')],
+ entrypoint=['top'],
+ command=['-d', '1']
+ )
+ service.create_container()
+
+ containers = service.containers(stopped=True)
+ self.assertEqual(len(containers), 1)
+ container, = containers
+ self.assertFalse(container.is_running)
+
+ service.execute_convergence_plan(ConvergencePlan('start', [container]))
+
+ containers = service.containers()
+ self.assertEqual(len(containers), 1)
+ container.inspect()
+ self.assertEqual(container, containers[0])
+ self.assertTrue(container.is_running)
+
+ def test_execute_convergence_plan_with_image_declared_volume(self):
+ service = Service(
+ project='composetest',
+ name='db',
+ client=self.client,
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ )
+
+ old_container = create_and_start_container(service)
+ self.assertEqual(
+ [mount['Destination'] for mount in old_container.get('Mounts')], ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ self.assertEqual(
+ [mount['Destination'] for mount in new_container.get('Mounts')],
+ ['/data']
+ )
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_execute_convergence_plan_when_image_volume_masks_config(self):
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ )
+
+ old_container = create_and_start_container(service)
+ self.assertEqual(
+ [mount['Destination'] for mount in old_container.get('Mounts')],
+ ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
+
+ with mock.patch('compose.service.log') as mock_log:
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warn.mock_calls[0]
+ self.assertIn(
+ "Service \"db\" is using volume \"/data\" from the previous container",
+ args[0])
+
+ self.assertEqual(
+ [mount['Destination'] for mount in new_container.get('Mounts')],
+ ['/data']
+ )
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_execute_convergence_plan_when_host_volume_is_removed(self):
+ host_path = '/tmp/host-path'
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ volumes=[VolumeSpec(host_path, '/data', 'rw')])
+
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/data']
+ )
+ service.options['volumes'] = []
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ assert not mock_log.warn.called
+ assert (
+ [mount['Destination'] for mount in new_container.get('Mounts')] ==
+ ['/data']
+ )
+ assert new_container.get_mount('/data')['Source'] != host_path
+
+ def test_execute_convergence_plan_without_start(self):
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'}
+ )
+
+ containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ containers = service.execute_convergence_plan(
+ ConvergencePlan('recreate', containers),
+ start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ service.execute_convergence_plan(ConvergencePlan('start', containers), start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ def test_start_container_passes_through_options(self):
+ db = self.create_service('db')
+ create_and_start_container(db, environment={'FOO': 'BAR'})
+ self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+
+ def test_start_container_inherits_options_from_constructor(self):
+ db = self.create_service('db', environment={'FOO': 'BAR'})
+ create_and_start_container(db)
+ self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_creates_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, None)])
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'])
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_creates_links_with_names(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'custom_link_name')])
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'custom_link_name'])
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_with_external_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', external_links=['composetest_db_1',
+ 'composetest_db_2',
+ 'composetest_db_3:db_3'])
+
+ for _ in range(3):
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1',
+ 'composetest_db_2',
+ 'db_3']),
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_normal_container_does_not_create_links_to_its_own_service(self):
+ db = self.create_service('db')
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+
+ c = create_and_start_container(db)
+ self.assertEqual(set(get_links(c)), set([]))
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_one_off_container_creates_links_to_its_own_service(self):
+ db = self.create_service('db')
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+
+ c = create_and_start_container(db, one_off=OneOffFilter.only)
+
+ self.assertEqual(
+ set(get_links(c)),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'])
+ )
+
+ def test_start_container_builds_images(self):
+ service = Service(
+ name='test',
+ client=self.client,
+ build={'context': 'tests/fixtures/simple-dockerfile'},
+ project='composetest',
+ )
+ container = create_and_start_container(service)
+ container.wait()
+ self.assertIn(b'success', container.logs())
+ assert len(self.client.images(name='composetest_test')) >= 1
+
+ def test_start_container_uses_tagged_image_if_it_exists(self):
+ self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
+ service = Service(
+ name='test',
+ client=self.client,
+ build={'context': 'this/does/not/exist/and/will/throw/error'},
+ project='composetest',
+ )
+ container = create_and_start_container(service)
+ container.wait()
+ self.assertIn(b'success', container.logs())
+
+ def test_start_container_creates_ports(self):
+ service = self.create_service('web', ports=[8000])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
+ self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+
+ def test_build(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('web', build={'context': base_dir})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert self.client.inspect_image('composetest_web')
+
+ def test_build_non_ascii_filename(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
+ f.write("hello world\n")
+
+ service = self.create_service('web', build={'context': text_type(base_dir)})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert self.client.inspect_image('composetest_web')
+
+ def test_build_with_image_name(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ image_name = 'examples/composetest:latest'
+ self.addCleanup(self.client.remove_image, image_name)
+ self.create_service('web', build={'context': base_dir}, image=image_name).build()
+ assert self.client.inspect_image(image_name)
+
+ def test_build_with_git_url(self):
+ build_url = "https://github.com/dnephin/docker-build-from-url.git"
+ service = self.create_service('buildwithurl', build={'context': build_url})
+ self.addCleanup(self.client.remove_image, service.image_name)
+ service.build()
+ assert service.image()
+
+ def test_build_with_build_args(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+ f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
+
+ service = self.create_service('buildwithargs',
+ build={'context': text_type(base_dir),
+ 'args': {"build_version": "1"}})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert service.image()
+ assert "build_version=1" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_args_override(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+ f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
+
+ service = self.create_service('buildwithargs',
+ build={'context': text_type(base_dir),
+ 'args': {"build_version": "1"}})
+ service.build(build_args_override={'build_version': '2'})
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert "build_version=2" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_labels(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+
+ service = self.create_service('buildlabels', build={
+ 'context': text_type(base_dir),
+ 'labels': {'com.docker.compose.test': 'true'}
+ })
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test'] == 'true'
+
+ @no_cluster('Container networks not on Swarm')
+ def test_build_with_network(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ f.write('RUN ping -c1 google.local\n')
+
+ net_container = self.client.create_container(
+ 'busybox', 'top', host_config=self.client.create_host_config(
+ extra_hosts={'google.local': '127.0.0.1'}
+ ), name='composetest_build_network'
+ )
+
+ self.addCleanup(self.client.remove_container, net_container, force=True)
+ self.client.start(net_container)
+
+ service = self.create_service('buildwithnet', build={
+ 'context': text_type(base_dir),
+ 'network': 'container:{}'.format(net_container['Id'])
+ })
+
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
+ @v2_3_only()
+ @no_cluster('Not supported on UCP 2.2.0-beta1') # FIXME: remove once support is added
+ def test_build_with_target(self):
+ self.require_api_version('1.30')
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox as one\n')
+ f.write('LABEL com.docker.compose.test=true\n')
+ f.write('LABEL com.docker.compose.test.target=one\n')
+ f.write('FROM busybox as two\n')
+ f.write('LABEL com.docker.compose.test.target=two\n')
+
+ service = self.create_service('buildtarget', build={
+ 'context': text_type(base_dir),
+ 'target': 'one'
+ })
+
+ service.build()
+ assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
+
+ def test_start_container_stays_unprivileged(self):
+ service = self.create_service('web')
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['HostConfig']['Privileged'], False)
+
+ def test_start_container_becomes_privileged(self):
+ service = self.create_service('web', privileged=True)
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['HostConfig']['Privileged'], True)
+
+ def test_expose_does_not_publish_ports(self):
+ service = self.create_service('web', expose=["8000"])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
+
+ def test_start_container_creates_port_with_explicit_protocol(self):
+ service = self.create_service('web', ports=['8000/udp'])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
+
+ def test_start_container_creates_fixed_external_ports(self):
+ service = self.create_service('web', ports=['8000:8000'])
+ container = create_and_start_container(service).inspect()
+ self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
+ self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+
+ def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
+ service = self.create_service('web', ports=['8001:8000'])
+ container = create_and_start_container(service).inspect()
+ self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
+ self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
+
+ def test_port_with_explicit_interface(self):
+ service = self.create_service('web', ports=[
+ '127.0.0.1:8001:8000',
+ '0.0.0.0:9001:9000/udp',
+ ])
+ container = create_and_start_container(service).inspect()
+ assert container['NetworkSettings']['Ports']['8000/tcp'] == [{
+ 'HostIp': '127.0.0.1',
+ 'HostPort': '8001',
+ }]
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostPort'] == '9001'
+ if not is_cluster(self.client):
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostIp'] == '0.0.0.0'
+ # self.assertEqual(container['NetworkSettings']['Ports'], {
+ # '8000/tcp': [
+ # {
+ # 'HostIp': '127.0.0.1',
+ # 'HostPort': '8001',
+ # },
+ # ],
+ # '9000/udp': [
+ # {
+ # 'HostIp': '0.0.0.0',
+ # 'HostPort': '9001',
+ # },
+ # ],
+ # })
+
+ def test_create_with_image_id(self):
+ # Get image id for the current busybox:latest
+ pull_busybox(self.client)
+ image_id = self.client.inspect_image('busybox:latest')['Id'][:12]
+ service = self.create_service('foo', image=image_id)
+ service.create_container()
+
+ def test_scale(self):
+ service = self.create_service('web')
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+
+ # Ensure containers don't have stdout or stdin connected
+ container = service.containers()[0]
+ config = container.inspect()['Config']
+ self.assertFalse(config['AttachStderr'])
+ self.assertFalse(config['AttachStdout'])
+ self.assertFalse(config['AttachStdin'])
+
+ service.scale(3)
+ self.assertEqual(len(service.containers()), 3)
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(0)
+ self.assertEqual(len(service.containers()), 0)
+
+ @pytest.mark.skipif(
+ SWARM_SKIP_CONTAINERS_ALL,
+ reason='Swarm /containers/json bug'
+ )
+ def test_scale_with_stopped_containers(self):
+ """
+ Given there are some stopped containers and scale is called with a
+ desired number that is the same as the number of stopped containers,
+ test that those containers are restarted and not removed/recreated.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ valid_numbers = [next_number, next_number + 1]
+ service.create_container(number=next_number)
+ service.create_container(number=next_number + 1)
+
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ service.scale(2)
+ for container in service.containers():
+ self.assertTrue(container.is_running)
+ self.assertTrue(container.number in valid_numbers)
+
+ captured_output = mock_stderr.getvalue()
+ self.assertNotIn('Creating', captured_output)
+ self.assertIn('Starting', captured_output)
+
+ def test_scale_with_stopped_containers_and_needing_creation(self):
+ """
+ Given there are some stopped containers and scale is called with a
+ desired number that is greater than the number of stopped containers,
+ test that those containers are restarted and required number are created.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ for container in service.containers():
+ self.assertFalse(container.is_running)
+
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ service.scale(2)
+
+ self.assertEqual(len(service.containers()), 2)
+ for container in service.containers():
+ self.assertTrue(container.is_running)
+
+ captured_output = mock_stderr.getvalue()
+ self.assertIn('Creating', captured_output)
+ self.assertIn('Starting', captured_output)
+
+ def test_scale_with_api_error(self):
+ """Test that when scaling if the API returns an error, that error is handled
+ and the remaining threads continue.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ with mock.patch(
+ 'compose.container.Container.create',
+ side_effect=APIError(
+ message="testing",
+ response={},
+ explanation="Boom")):
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
+
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
+ assert (
+ "ERROR: for composetest_web_2 Cannot create container for service"
+ " web: Boom" in mock_stderr.getvalue()
+ )
+
+ def test_scale_with_unexpected_exception(self):
+ """Test that when scaling if the API returns an error, that is not of type
+ APIError, that error is re-raised.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ with mock.patch(
+ 'compose.container.Container.create',
+ side_effect=ValueError("BOOM")
+ ):
+ with self.assertRaises(ValueError):
+ service.scale(3)
+
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ @mock.patch('compose.service.log')
+ def test_scale_with_desired_number_already_achieved(self, mock_log):
+ """
+ Test that calling scale with a desired number that is equal to the
+ number of containers already running results in no change.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ container = service.create_container(number=next_number, quiet=True)
+ container.start()
+
+ container.inspect()
+ assert container.is_running
+ assert len(service.containers()) == 1
+
+ service.scale(1)
+ assert len(service.containers()) == 1
+ container.inspect()
+ assert container.is_running
+
+ captured_output = mock_log.info.call_args[0]
+ assert 'Desired container number already achieved' in captured_output
+
+ @mock.patch('compose.service.log')
+ def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
+ """Test that calling scale on a service that has a custom container name
+ results in warning output.
+ """
+ service = self.create_service('app', container_name='custom-container')
+ self.assertEqual(service.custom_container_name, 'custom-container')
+
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
+
+ captured_output = mock_log.warn.call_args[0][0]
+
+ self.assertEqual(len(service.containers()), 1)
+ self.assertIn(
+ "Remove the custom name to scale the service.",
+ captured_output
+ )
+
+ def test_scale_sets_ports(self):
+ service = self.create_service('web', ports=['8000'])
+ service.scale(2)
+ containers = service.containers()
+ self.assertEqual(len(containers), 2)
+ for container in containers:
+ self.assertEqual(
+ list(container.get('HostConfig.PortBindings')),
+ ['8000/tcp'])
+
+ def test_scale_with_immediate_exit(self):
+ service = self.create_service('web', image='busybox', command='true')
+ service.scale(2)
+ assert len(service.containers(stopped=True)) == 2
+
+ def test_network_mode_none(self):
+ service = self.create_service('web', network_mode=NetworkMode('none'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
+
+ def test_network_mode_bridged(self):
+ service = self.create_service('web', network_mode=NetworkMode('bridge'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
+
+ def test_network_mode_host(self):
+ service = self.create_service('web', network_mode=NetworkMode('host'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
+
+ def test_pid_mode_none_defined(self):
+ service = self.create_service('web', pid_mode=None)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.PidMode'), '')
+
+ def test_pid_mode_host(self):
+ service = self.create_service('web', pid_mode=PidMode('host'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.PidMode'), 'host')
+
+ @v2_1_only()
+ def test_userns_mode_none_defined(self):
+ service = self.create_service('web', userns_mode=None)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), '')
+
+ @v2_1_only()
+ def test_userns_mode_host(self):
+ service = self.create_service('web', userns_mode='host')
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), 'host')
+
+ def test_dns_no_value(self):
+ service = self.create_service('web')
+ container = create_and_start_container(service)
+ self.assertIsNone(container.get('HostConfig.Dns'))
+
+ def test_dns_list(self):
+ service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
+
+ def test_mem_swappiness(self):
+ service = self.create_service('web', mem_swappiness=11)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.MemorySwappiness'), 11)
+
+ def test_mem_reservation(self):
+ service = self.create_service('web', mem_reservation='20m')
+ container = create_and_start_container(service)
+ assert container.get('HostConfig.MemoryReservation') == 20 * 1024 * 1024
+
+ def test_restart_always_value(self):
+ service = self.create_service('web', restart={'Name': 'always'})
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
+
+ def test_oom_score_adj_value(self):
+ service = self.create_service('web', oom_score_adj=500)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.OomScoreAdj'), 500)
+
+ def test_group_add_value(self):
+ service = self.create_service('web', group_add=["root", "1"])
+ container = create_and_start_container(service)
+
+ host_container_groupadd = container.get('HostConfig.GroupAdd')
+ assert "root" in host_container_groupadd
+ assert "1" in host_container_groupadd
+
+ def test_dns_opt_value(self):
+ service = self.create_service('web', dns_opt=["use-vc", "no-tld-query"])
+ container = create_and_start_container(service)
+
+ dns_opt = container.get('HostConfig.DnsOptions')
+ assert 'use-vc' in dns_opt
+ assert 'no-tld-query' in dns_opt
+
+ def test_restart_on_failure_value(self):
+ service = self.create_service('web', restart={
+ 'Name': 'on-failure',
+ 'MaximumRetryCount': 5
+ })
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
+ self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
+
+ def test_cap_add_list(self):
+ service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
+
+ def test_cap_drop_list(self):
+ service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
+
+ def test_dns_search(self):
+ service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
+
+ @v2_only()
+ def test_tmpfs(self):
+ service = self.create_service('web', tmpfs=['/run'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.Tmpfs'), {'/run': ''})
+
+ def test_working_dir_param(self):
+ service = self.create_service('container', working_dir='/working/dir/sample')
+ container = service.create_container()
+ self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
+
+ def test_split_env(self):
+ service = self.create_service(
+ 'web',
+ environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
+ env = create_and_start_container(service).environment
+ for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
+ self.assertEqual(env[k], v)
+
+ def test_env_from_file_combined_with_env(self):
+ service = self.create_service(
+ 'web',
+ environment=['ONE=1', 'TWO=2', 'THREE=3'],
+ env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
+ env = create_and_start_container(service).environment
+ for k, v in {
+ 'ONE': '1',
+ 'TWO': '2',
+ 'THREE': '3',
+ 'FOO': 'baz',
+ 'DOO': 'dah'
+ }.items():
+ self.assertEqual(env[k], v)
+
+ @v3_only()
+ def test_build_with_cachefrom(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('cache_from',
+ build={'context': base_dir,
+ 'cache_from': ['build1']})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_env(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+ service = self.create_service(
+ 'web',
+ environment={
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': None,
+ 'NO_DEF': None
+ }
+ )
+ env = create_and_start_container(service).environment
+ for k, v in {
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': 'E3',
+ 'NO_DEF': None
+ }.items():
+ self.assertEqual(env[k], v)
+
+ def test_with_high_enough_api_version_we_get_default_network_mode(self):
+ # TODO: remove this test once minimum docker version is 1.8.x
+ with mock.patch.object(self.client, '_version', '1.20'):
+ service = self.create_service('web')
+ service_config = service._get_container_host_config({})
+ self.assertEqual(service_config['NetworkMode'], 'default')
+
+ def test_labels(self):
+ labels_dict = {
+ 'com.example.description': "Accounting webapp",
+ 'com.example.department': "Finance",
+ 'com.example.label-with-empty-value': "",
+ }
+
+ compose_labels = {
+ LABEL_CONTAINER_NUMBER: '1',
+ LABEL_ONE_OFF: 'False',
+ LABEL_PROJECT: 'composetest',
+ LABEL_SERVICE: 'web',
+ LABEL_VERSION: __version__,
+ }
+ expected = dict(labels_dict, **compose_labels)
+
+ service = self.create_service('web', labels=labels_dict)
+ labels = create_and_start_container(service).labels.items()
+ for pair in expected.items():
+ self.assertIn(pair, labels)
+
+ def test_empty_labels(self):
+ labels_dict = {'foo': '', 'bar': ''}
+ service = self.create_service('web', labels=labels_dict)
+ labels = create_and_start_container(service).labels.items()
+ for name in labels_dict:
+ self.assertIn((name, ''), labels)
+
+ def test_stop_signal(self):
+ stop_signal = 'SIGINT'
+ service = self.create_service('web', stop_signal=stop_signal)
+ container = create_and_start_container(service)
+ self.assertEqual(container.stop_signal, stop_signal)
+
+ def test_custom_container_name(self):
+ service = self.create_service('web', container_name='my-web-container')
+ self.assertEqual(service.custom_container_name, 'my-web-container')
+
+ container = create_and_start_container(service)
+ self.assertEqual(container.name, 'my-web-container')
+
+ one_off_container = service.create_container(one_off=True)
+ self.assertNotEqual(one_off_container.name, 'my-web-container')
+
+ @pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0")
+ def test_log_drive_invalid(self):
+ service = self.create_service('web', logging={'driver': 'xxx'})
+ expected_error_msg = "logger: no log driver named 'xxx' is registered"
+
+ with self.assertRaisesRegexp(APIError, expected_error_msg):
+ create_and_start_container(service)
+
+ def test_log_drive_empty_default_jsonfile(self):
+ service = self.create_service('web')
+ log_config = create_and_start_container(service).log_config
+
+ self.assertEqual('json-file', log_config['Type'])
+ self.assertFalse(log_config['Config'])
+
+ def test_log_drive_none(self):
+ service = self.create_service('web', logging={'driver': 'none'})
+ log_config = create_and_start_container(service).log_config
+
+ self.assertEqual('none', log_config['Type'])
+ self.assertFalse(log_config['Config'])
+
+ def test_devices(self):
+ service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
+ device_config = create_and_start_container(service).get('HostConfig.Devices')
+
+ device_dict = {
+ 'PathOnHost': '/dev/random',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/mapped-random'
+ }
+
+ self.assertEqual(1, len(device_config))
+ self.assertDictEqual(device_dict, device_config[0])
+
+ def test_duplicate_containers(self):
+ service = self.create_service('web')
+
+ options = service._get_container_create_options({}, 1)
+ original = Container.create(service.client, **options)
+
+ self.assertEqual(set(service.containers(stopped=True)), set([original]))
+ self.assertEqual(set(service.duplicate_containers()), set())
+
+ options['name'] = 'temporary_container_name'
+ duplicate = Container.create(service.client, **options)
+
+ self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
+ self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
+
+
+def converge(service, strategy=ConvergenceStrategy.changed):
+ """Create a converge plan from a strategy and execute the plan."""
+ plan = service.convergence_plan(strategy)
+ return service.execute_convergence_plan(plan, timeout=1)
+
+
+class ConfigHashTest(DockerClientTestCase):
+
+ def test_no_config_hash_when_one_off(self):
+ web = self.create_service('web')
+ container = web.create_container(one_off=True)
+ self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+
+ def test_no_config_hash_when_overriding_options(self):
+ web = self.create_service('web')
+ container = web.create_container(environment={'FOO': '1'})
+ self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+
+ def test_config_hash_with_custom_labels(self):
+ web = self.create_service('web', labels={'foo': '1'})
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
+ self.assertIn('foo', container.labels)
+
+ def test_config_hash_sticks_around(self):
+ web = self.create_service('web', command=["top"])
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
+
+ web = self.create_service('web', command=["top", "-d", "1"])
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py
new file mode 100644
index 00000000..047dc704
--- /dev/null
+++ b/tests/integration/state_test.py
@@ -0,0 +1,308 @@
+"""
+Integration tests which cover state convergence (aka smart recreate) performed
+by `docker-compose up`.
+"""
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import py
+from docker.errors import ImageNotFound
+
+from .testcases import DockerClientTestCase
+from .testcases import get_links
+from .testcases import no_cluster
+from compose.config import config
+from compose.project import Project
+from compose.service import ConvergenceStrategy
+
+
+class ProjectTestCase(DockerClientTestCase):
+ def run_up(self, cfg, **kwargs):
+ kwargs.setdefault('timeout', 1)
+ kwargs.setdefault('detached', True)
+
+ project = self.make_project(cfg)
+ project.up(**kwargs)
+ return set(project.containers(stopped=True))
+
+ def make_project(self, cfg):
+ details = config.ConfigDetails(
+ 'working_dir',
+ [config.ConfigFile(None, cfg)])
+ return Project.from_config(
+ name='composetest',
+ client=self.client,
+ config_data=config.load(details))
+
+
+class BasicProjectTest(ProjectTestCase):
+ def setUp(self):
+ super(BasicProjectTest, self).setUp()
+
+ self.cfg = {
+ 'db': {'image': 'busybox:latest', 'command': 'top'},
+ 'web': {'image': 'busybox:latest', 'command': 'top'},
+ }
+
+ def test_no_change(self):
+ old_containers = self.run_up(self.cfg)
+ self.assertEqual(len(old_containers), 2)
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ self.assertEqual(old_containers, new_containers)
+
+ def test_partial_change(self):
+ old_containers = self.run_up(self.cfg)
+ old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
+ old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
+
+ self.cfg['web']['command'] = '/bin/true'
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ preserved = list(old_containers & new_containers)
+ self.assertEqual(preserved, [old_db])
+
+ removed = list(old_containers - new_containers)
+ self.assertEqual(removed, [old_web])
+
+ created = list(new_containers - old_containers)
+ self.assertEqual(len(created), 1)
+ self.assertEqual(created[0].name_without_project, 'web_1')
+ self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
+
+ def test_all_change(self):
+ old_containers = self.run_up(self.cfg)
+ self.assertEqual(len(old_containers), 2)
+
+ self.cfg['web']['command'] = '/bin/true'
+ self.cfg['db']['command'] = '/bin/true'
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ unchanged = old_containers & new_containers
+ self.assertEqual(len(unchanged), 0)
+
+ new = new_containers - old_containers
+ self.assertEqual(len(new), 2)
+
+
+class ProjectWithDependenciesTest(ProjectTestCase):
+ def setUp(self):
+ super(ProjectWithDependenciesTest, self).setUp()
+
+ self.cfg = {
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ 'links': ['db'],
+ },
+ 'nginx': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ 'links': ['web'],
+ },
+ }
+
+ def test_up(self):
+ containers = self.run_up(self.cfg)
+ self.assertEqual(
+ set(c.name_without_project for c in containers),
+ set(['db_1', 'web_1', 'nginx_1']),
+ )
+
+ def test_change_leaf(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['nginx_1']),
+ )
+
+ def test_change_middle(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['web']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['web_1', 'nginx_1']),
+ )
+
+ def test_change_root(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['db_1', 'web_1', 'nginx_1']),
+ )
+
+ def test_change_root_no_recreate(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(
+ self.cfg,
+ strategy=ConvergenceStrategy.never)
+
+ self.assertEqual(new_containers - old_containers, set())
+
+ def test_service_removed_while_down(self):
+ next_cfg = {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ },
+ 'nginx': self.cfg['nginx'],
+ }
+
+ containers = self.run_up(self.cfg)
+ self.assertEqual(len(containers), 3)
+
+ project = self.make_project(self.cfg)
+ project.stop(timeout=1)
+
+ containers = self.run_up(next_cfg)
+ self.assertEqual(len(containers), 2)
+
+ def test_service_recreated_when_dependency_created(self):
+ containers = self.run_up(self.cfg, service_names=['web'], start_deps=False)
+ self.assertEqual(len(containers), 1)
+
+ containers = self.run_up(self.cfg)
+ self.assertEqual(len(containers), 3)
+
+ web, = [c for c in containers if c.service == 'web']
+ nginx, = [c for c in containers if c.service == 'nginx']
+
+ self.assertEqual(set(get_links(web)), {'composetest_db_1', 'db', 'db_1'})
+ self.assertEqual(set(get_links(nginx)), {'composetest_web_1', 'web', 'web_1'})
+
+
+class ServiceStateTest(DockerClientTestCase):
+ """Test cases for Service.convergence_plan."""
+
+ def test_trigger_create(self):
+ web = self.create_service('web')
+ self.assertEqual(('create', []), web.convergence_plan())
+
+ def test_trigger_noop(self):
+ web = self.create_service('web')
+ container = web.create_container()
+ web.start()
+
+ web = self.create_service('web')
+ self.assertEqual(('noop', [container]), web.convergence_plan())
+
+ def test_trigger_start(self):
+ options = dict(command=["top"])
+
+ web = self.create_service('web', **options)
+ web.scale(2)
+
+ containers = web.containers(stopped=True)
+ containers[0].stop()
+ containers[0].inspect()
+
+ self.assertEqual([c.is_running for c in containers], [False, True])
+
+ self.assertEqual(
+ ('start', containers[0:1]),
+ web.convergence_plan(),
+ )
+
+ def test_trigger_recreate_with_config_change(self):
+ web = self.create_service('web', command=["top"])
+ container = web.create_container()
+
+ web = self.create_service('web', command=["top", "-d", "1"])
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_trigger_recreate_with_nonexistent_image_tag(self):
+ web = self.create_service('web', image="busybox:latest")
+ container = web.create_container()
+
+ web = self.create_service('web', image="nonexistent-image")
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_trigger_recreate_with_image_change(self):
+ repo = 'composetest_myimage'
+ tag = 'latest'
+ image = '{}:{}'.format(repo, tag)
+
+ def safe_remove_image(image):
+ try:
+ self.client.remove_image(image)
+ except ImageNotFound:
+ pass
+
+ image_id = self.client.images(name='busybox')[0]['Id']
+ self.client.tag(image_id, repository=repo, tag=tag)
+ self.addCleanup(safe_remove_image, image)
+
+ web = self.create_service('web', image=image)
+ container = web.create_container()
+
+ # update the image
+ c = self.client.create_container(image, ['touch', '/hello.txt'], host_config={})
+
+ # In the case of a cluster, there's a chance we pick up the old image when
+ # calculating the new hash. To circumvent that, untag the old image first
+ # See also: https://github.com/moby/moby/issues/26852
+ self.client.remove_image(image, force=True)
+
+ self.client.commit(c, repository=repo, tag=tag)
+ self.client.remove_container(c)
+
+ web = self.create_service('web', image=image)
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ @no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
+ def test_trigger_recreate_with_build(self):
+ context = py.test.ensuretemp('test_trigger_recreate_with_build')
+ self.addCleanup(context.remove)
+
+ base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
+ dockerfile = context.join('Dockerfile')
+ dockerfile.write(base_image)
+
+ web = self.create_service('web', build={'context': str(context)})
+ container = web.create_container()
+
+ dockerfile.write(base_image + 'CMD echo hello world\n')
+ web.build()
+
+ web = self.create_service('web', build={'context': str(context)})
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_image_changed_to_build(self):
+ context = py.test.ensuretemp('test_image_changed_to_build')
+ self.addCleanup(context.remove)
+ context.join('Dockerfile').write("""
+ FROM busybox
+ LABEL com.docker.compose.test_image=true
+ """)
+
+ web = self.create_service('web', image='busybox')
+ container = web.create_container()
+
+ web = self.create_service('web', build={'context': str(context)})
+ plan = web.convergence_plan()
+ self.assertEqual(('recreate', [container]), plan)
+ containers = web.execute_convergence_plan(plan)
+ self.assertEqual(len(containers), 1)
diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py
new file mode 100644
index 00000000..b72fb53a
--- /dev/null
+++ b/tests/integration/testcases.py
@@ -0,0 +1,187 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import os
+
+import pytest
+from docker.errors import APIError
+from docker.utils import version_lt
+
+from .. import unittest
+from compose.cli.docker_client import docker_client
+from compose.config.config import resolve_environment
+from compose.config.environment import Environment
+from compose.const import API_VERSIONS
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_0 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import LABEL_PROJECT
+from compose.progress_stream import stream_output
+from compose.service import Service
+
+SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
+SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
+SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
+SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
+
+
+def pull_busybox(client):
+ client.pull('busybox:latest', stream=False)
+
+
+def get_links(container):
+ links = container.get('HostConfig.Links') or []
+
+ def format_link(link):
+ _, alias = link.split(':')
+ return alias.split('/')[-1]
+
+ return [format_link(link) for link in links]
+
+
+def engine_max_version():
+ if 'DOCKER_VERSION' not in os.environ:
+ return V3_3
+ version = os.environ['DOCKER_VERSION'].partition('-')[0]
+ if version_lt(version, '1.10'):
+ return V1
+ if version_lt(version, '1.12'):
+ return V2_0
+ if version_lt(version, '1.13'):
+ return V2_1
+ if version_lt(version, '17.06'):
+ return V3_2
+ return V3_3
+
+
+def min_version_skip(version):
+ return pytest.mark.skipif(
+ engine_max_version() < version,
+ reason="Engine version %s is too low" % version
+ )
+
+
+def v2_only():
+ return min_version_skip(V2_0)
+
+
+def v2_1_only():
+ return min_version_skip(V2_1)
+
+
+def v2_2_only():
+ return min_version_skip(V2_2)
+
+
+def v2_3_only():
+ return min_version_skip(V2_3)
+
+
+def v3_only():
+ return min_version_skip(V3_0)
+
+
+class DockerClientTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ version = API_VERSIONS[engine_max_version()]
+ cls.client = docker_client(Environment(), version)
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.client
+
+ def tearDown(self):
+ for c in self.client.containers(
+ all=True,
+ filters={'label': '%s=composetest' % LABEL_PROJECT}):
+ self.client.remove_container(c['Id'], force=True)
+
+ for i in self.client.images(
+ filters={'label': 'com.docker.compose.test_image'}):
+ try:
+ self.client.remove_image(i, force=True)
+ except APIError as e:
+ if e.is_server_error():
+ pass
+
+ volumes = self.client.volumes().get('Volumes') or []
+ for v in volumes:
+ if 'composetest_' in v['Name']:
+ self.client.remove_volume(v['Name'])
+
+ networks = self.client.networks()
+ for n in networks:
+ if 'composetest_' in n['Name']:
+ self.client.remove_network(n['Name'])
+
+ def create_service(self, name, **kwargs):
+ if 'image' not in kwargs and 'build' not in kwargs:
+ kwargs['image'] = 'busybox:latest'
+
+ if 'command' not in kwargs:
+ kwargs['command'] = ["top"]
+
+ kwargs['environment'] = resolve_environment(
+ kwargs, Environment.from_env_file(None)
+ )
+ labels = dict(kwargs.setdefault('labels', {}))
+ labels['com.docker.compose.test-name'] = self.id()
+
+ return Service(name, client=self.client, project='composetest', **kwargs)
+
+ def check_build(self, *args, **kwargs):
+ kwargs.setdefault('rm', True)
+ build_output = self.client.build(*args, **kwargs)
+ stream_output(build_output, open('/dev/null', 'w'))
+
+ def require_api_version(self, minimum):
+ api_version = self.client.version()['ApiVersion']
+ if version_lt(api_version, minimum):
+ pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
+
+ def get_volume_data(self, volume_name):
+ if not is_cluster(self.client):
+ return self.client.inspect_volume(volume_name)
+
+ volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
+ assert len(volumes) > 0
+ return self.client.inspect_volume(volumes[0]['Name'])
+
+
+def is_cluster(client):
+ if SWARM_ASSUME_MULTINODE:
+ return True
+
+ def get_nodes_number():
+ try:
+ return len(client.nodes())
+ except APIError:
+ # If the Engine is not part of a Swarm, the SDK will raise
+ # an APIError
+ return 0
+
+ if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
+ # Only make the API call if the value hasn't been cached yet
+ is_cluster.nodes = get_nodes_number()
+
+ return is_cluster.nodes > 1
+
+
+def no_cluster(reason):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if is_cluster(self.client):
+ pytest.skip("Test will not be run in cluster mode: %s" % reason)
+ return
+ return f(self, *args, **kwargs)
+ return wrapper
+
+ return decorator
diff --git a/tests/integration/volume_test.py b/tests/integration/volume_test.py
new file mode 100644
index 00000000..2a521d4c
--- /dev/null
+++ b/tests/integration/volume_test.py
@@ -0,0 +1,126 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+from docker.errors import DockerException
+
+from .testcases import DockerClientTestCase
+from .testcases import no_cluster
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_VOLUME
+from compose.volume import Volume
+
+
+class VolumeTest(DockerClientTestCase):
+ def setUp(self):
+ self.tmp_volumes = []
+
+ def tearDown(self):
+ for volume in self.tmp_volumes:
+ try:
+ self.client.remove_volume(volume.full_name)
+ except DockerException:
+ pass
+ del self.tmp_volumes
+ super(VolumeTest, self).tearDown()
+
+ def create_volume(self, name, driver=None, opts=None, external=None, custom_name=False):
+ if external:
+ custom_name = True
+ if isinstance(external, six.text_type):
+ name = external
+
+ vol = Volume(
+ self.client, 'composetest', name, driver=driver, driver_opts=opts,
+ external=bool(external), custom_name=custom_name
+ )
+ self.tmp_volumes.append(vol)
+ return vol
+
+ def test_create_volume(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ def test_create_volume_custom_name(self):
+ vol = self.create_volume('volume01', custom_name=True)
+ assert vol.name == vol.full_name
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.name
+
+ def test_recreate_existing_volume(self):
+ vol = self.create_volume('volume01')
+
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_inspect_volume(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == vol.full_name
+
+ @no_cluster('remove volume by name defect on Swarm Classic')
+ def test_remove_volume(self):
+ vol = Volume(self.client, 'composetest', 'volume01')
+ vol.create()
+ vol.remove()
+ volumes = self.client.volumes()['Volumes']
+ assert len([v for v in volumes if v['Name'] == vol.full_name]) == 0
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_external_volume(self):
+ vol = self.create_volume('composetest_volume_ext', external=True)
+ assert vol.external is True
+ assert vol.full_name == vol.name
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == vol.name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_external_aliased_volume(self):
+ alias_name = 'composetest_alias01'
+ vol = self.create_volume('volume01', external=alias_name)
+ assert vol.external is True
+ assert vol.full_name == alias_name
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == alias_name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists(self):
+ vol = self.create_volume('volume01')
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists_external(self):
+ vol = self.create_volume('volume01', external=True)
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists_external_aliased(self):
+ vol = self.create_volume('volume01', external='composetest_alias01')
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_volume_default_labels(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ vol_data = vol.inspect()
+ labels = vol_data['Labels']
+ assert labels[LABEL_VOLUME] == vol.name
+ assert labels[LABEL_PROJECT] == vol.project
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/__init__.py
diff --git a/tests/unit/bundle_test.py b/tests/unit/bundle_test.py
new file mode 100644
index 00000000..84779520
--- /dev/null
+++ b/tests/unit/bundle_test.py
@@ -0,0 +1,222 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import mock
+import pytest
+
+from compose import bundle
+from compose import service
+from compose.cli.errors import UserError
+from compose.config.config import Config
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+@pytest.fixture
+def mock_service():
+ return mock.create_autospec(
+ service.Service,
+ client=mock.create_autospec(docker.APIClient),
+ options={})
+
+
+def test_get_image_digest_exists(mock_service):
+ mock_service.options['image'] = 'abcd'
+ mock_service.image.return_value = {'RepoDigests': ['digest1']}
+ digest = bundle.get_image_digest(mock_service)
+ assert digest == 'digest1'
+
+
+def test_get_image_digest_image_uses_digest(mock_service):
+ mock_service.options['image'] = image_id = 'redis@sha256:digest'
+
+ digest = bundle.get_image_digest(mock_service)
+ assert digest == image_id
+ assert not mock_service.image.called
+
+
+def test_get_image_digest_no_image(mock_service):
+ with pytest.raises(UserError) as exc:
+ bundle.get_image_digest(service.Service(name='theservice'))
+
+ assert "doesn't define an image tag" in exc.exconly()
+
+
+def test_push_image_with_saved_digest(mock_service):
+ mock_service.options['build'] = '.'
+ mock_service.options['image'] = image_id = 'abcd'
+ mock_service.push.return_value = expected = 'sha256:thedigest'
+ mock_service.image.return_value = {'RepoDigests': ['digest1']}
+
+ digest = bundle.push_image(mock_service)
+ assert digest == image_id + '@' + expected
+
+ mock_service.push.assert_called_once_with()
+ assert not mock_service.client.push.called
+
+
+def test_push_image(mock_service):
+ mock_service.options['build'] = '.'
+ mock_service.options['image'] = image_id = 'abcd'
+ mock_service.push.return_value = expected = 'sha256:thedigest'
+ mock_service.image.return_value = {'RepoDigests': []}
+
+ digest = bundle.push_image(mock_service)
+ assert digest == image_id + '@' + expected
+
+ mock_service.push.assert_called_once_with()
+ mock_service.client.pull.assert_called_once_with(digest)
+
+
+def test_to_bundle():
+ image_digests = {'a': 'aaaa', 'b': 'bbbb'}
+ services = [
+ {'name': 'a', 'build': '.', },
+ {'name': 'b', 'build': './b'},
+ ]
+ config = Config(
+ version=V2_0,
+ services=services,
+ volumes={'special': {}},
+ networks={'extra': {}},
+ secrets={},
+ configs={}
+ )
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ output = bundle.to_bundle(config, image_digests)
+
+ assert mock_log.mock_calls == [
+ mock.call("Unsupported top level key 'networks' - ignoring"),
+ mock.call("Unsupported top level key 'volumes' - ignoring"),
+ ]
+
+ assert output == {
+ 'Version': '0.1',
+ 'Services': {
+ 'a': {'Image': 'aaaa', 'Networks': ['default']},
+ 'b': {'Image': 'bbbb', 'Networks': ['default']},
+ }
+ }
+
+
+def test_convert_service_to_bundle():
+ name = 'theservice'
+ image_digest = 'thedigest'
+ service_dict = {
+ 'ports': ['80'],
+ 'expose': ['1234'],
+ 'networks': {'extra': {}},
+ 'command': 'foo',
+ 'entrypoint': 'entry',
+ 'environment': {'BAZ': 'ENV'},
+ 'build': '.',
+ 'working_dir': '/tmp',
+ 'user': 'root',
+ 'labels': {'FOO': 'LABEL'},
+ 'privileged': True,
+ }
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
+
+ mock_log.assert_called_once_with(
+ "Unsupported key 'privileged' in services.theservice - ignoring")
+
+ assert config == {
+ 'Image': image_digest,
+ 'Ports': [
+ {'Protocol': 'tcp', 'Port': 80},
+ {'Protocol': 'tcp', 'Port': 1234},
+ ],
+ 'Networks': ['extra'],
+ 'Command': ['entry', 'foo'],
+ 'Env': ['BAZ=ENV'],
+ 'WorkingDir': '/tmp',
+ 'User': 'root',
+ 'Labels': {'FOO': 'LABEL'},
+ }
+
+
+def test_set_command_and_args_none():
+ config = {}
+ bundle.set_command_and_args(config, [], [])
+ assert config == {}
+
+
+def test_set_command_and_args_from_command():
+ config = {}
+ bundle.set_command_and_args(config, [], "echo ok")
+ assert config == {'Args': ['echo', 'ok']}
+
+
+def test_set_command_and_args_from_entrypoint():
+ config = {}
+ bundle.set_command_and_args(config, "echo entry", [])
+ assert config == {'Command': ['echo', 'entry']}
+
+
+def test_set_command_and_args_from_both():
+ config = {}
+ bundle.set_command_and_args(config, "echo entry", ["extra", "arg"])
+ assert config == {'Command': ['echo', 'entry', "extra", "arg"]}
+
+
+def test_make_service_networks_default():
+ name = 'theservice'
+ service_dict = {}
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ networks = bundle.make_service_networks(name, service_dict)
+
+ assert not mock_log.called
+ assert networks == ['default']
+
+
+def test_make_service_networks():
+ name = 'theservice'
+ service_dict = {
+ 'networks': {
+ 'foo': {
+ 'aliases': ['one', 'two'],
+ },
+ 'bar': {}
+ },
+ }
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ networks = bundle.make_service_networks(name, service_dict)
+
+ mock_log.assert_called_once_with(
+ "Unsupported key 'aliases' in services.theservice.networks.foo - ignoring")
+ assert sorted(networks) == sorted(service_dict['networks'])
+
+
+def test_make_port_specs():
+ service_dict = {
+ 'expose': ['80', '500/udp'],
+ 'ports': [
+ '400:80',
+ '222',
+ '127.0.0.1:8001:8001',
+ '127.0.0.1:5000-5001:3000-3001'],
+ }
+ port_specs = bundle.make_port_specs(service_dict)
+ assert port_specs == [
+ {'Protocol': 'tcp', 'Port': 80},
+ {'Protocol': 'tcp', 'Port': 222},
+ {'Protocol': 'tcp', 'Port': 8001},
+ {'Protocol': 'tcp', 'Port': 3000},
+ {'Protocol': 'tcp', 'Port': 3001},
+ {'Protocol': 'udp', 'Port': 500},
+ ]
+
+
+def test_make_port_spec_with_protocol():
+ port_spec = bundle.make_port_spec("5000/udp")
+ assert port_spec == {'Protocol': 'udp', 'Port': 5000}
+
+
+def test_make_port_spec_default_protocol():
+ port_spec = bundle.make_port_spec("50000")
+ assert port_spec == {'Protocol': 'tcp', 'Port': 50000}
diff --git a/tests/unit/cli/__init__.py b/tests/unit/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/cli/__init__.py
diff --git a/tests/unit/cli/command_test.py b/tests/unit/cli/command_test.py
new file mode 100644
index 00000000..3a9844c4
--- /dev/null
+++ b/tests/unit/cli/command_test.py
@@ -0,0 +1,76 @@
+# ~*~ encoding: utf-8 ~*~
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+
+import pytest
+import six
+
+from compose.cli.command import get_config_path_from_options
+from compose.config.environment import Environment
+from compose.const import IS_WINDOWS_PLATFORM
+from tests import mock
+
+
+class TestGetConfigPathFromOptions(object):
+
+ def test_path_from_options(self):
+ paths = ['one.yml', 'two.yml']
+ opts = {'--file': paths}
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options('.', opts, environment) == paths
+
+ def test_single_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options('.', {}, environment) == ['one.yml']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator')
+ def test_multiple_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml:two.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['one.yml', 'two.yml']
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator')
+ def test_multiple_path_from_env_windows(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml;two.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['one.yml', 'two.yml']
+
+ def test_multiple_path_from_env_custom_separator(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
+ os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['c:\\one.yml', '.\\semi;colon.yml']
+
+ def test_no_path(self):
+ environment = Environment.from_env_file('.')
+ assert not get_config_path_from_options('.', {}, environment)
+
+ def test_unicode_path_from_options(self):
+ paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
+ opts = {'--file': paths}
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', opts, environment
+ ) == ['就吃饭/docker-compose.yml']
+
+ @pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
+ def test_unicode_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['就吃饭/docker-compose.yml']
diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py
new file mode 100644
index 00000000..482ad985
--- /dev/null
+++ b/tests/unit/cli/docker_client_test.py
@@ -0,0 +1,187 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import platform
+import ssl
+
+import docker
+import pytest
+
+import compose
+from compose.cli import errors
+from compose.cli.docker_client import docker_client
+from compose.cli.docker_client import get_tls_version
+from compose.cli.docker_client import tls_config_from_options
+from tests import mock
+from tests import unittest
+
+
+class DockerClientTestCase(unittest.TestCase):
+
+ def test_docker_client_no_home(self):
+ with mock.patch.dict(os.environ):
+ del os.environ['HOME']
+ docker_client(os.environ)
+
+ @mock.patch.dict(os.environ)
+ def test_docker_client_with_custom_timeout(self):
+ os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
+ client = docker_client(os.environ)
+ assert client.timeout == 123
+
+ @mock.patch.dict(os.environ)
+ def test_custom_timeout_error(self):
+ os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
+ client = docker_client(os.environ)
+
+ with mock.patch('compose.cli.errors.log') as fake_log:
+ with pytest.raises(errors.ConnectionError):
+ with errors.handle_connection_errors(client):
+ raise errors.RequestsConnectionError(
+ errors.ReadTimeoutError(None, None, None))
+
+ assert fake_log.error.call_count == 1
+ assert '123' in fake_log.error.call_args[0][0]
+
+ with mock.patch('compose.cli.errors.log') as fake_log:
+ with pytest.raises(errors.ConnectionError):
+ with errors.handle_connection_errors(client):
+ raise errors.ReadTimeout()
+
+ assert fake_log.error.call_count == 1
+ assert '123' in fake_log.error.call_args[0][0]
+
+ def test_user_agent(self):
+ client = docker_client(os.environ)
+ expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
+ compose.__version__,
+ docker.__version__,
+ platform.system(),
+ platform.release()
+ )
+ self.assertEqual(client.headers['User-Agent'], expected)
+
+
+class TLSConfigTestCase(unittest.TestCase):
+ ca_cert = 'tests/fixtures/tls/ca.pem'
+ client_cert = 'tests/fixtures/tls/cert.pem'
+ key = 'tests/fixtures/tls/key.key'
+
+ def test_simple_tls(self):
+ options = {'--tls': True}
+ result = tls_config_from_options(options)
+ assert result is True
+
+ def test_tls_ca_cert(self):
+ options = {
+ '--tlscacert': self.ca_cert, '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_ca_cert_explicit(self):
+ options = {
+ '--tlscacert': self.ca_cert, '--tls': True,
+ '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_cert(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+
+ def test_tls_client_cert_explicit(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tls': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+
+ def test_tls_client_and_ca(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tlsverify': True, '--tlscacert': self.ca_cert
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_and_ca_explicit(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tlsverify': True, '--tlscacert': self.ca_cert,
+ '--tls': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_missing_key(self):
+ options = {'--tlscert': self.client_cert}
+ with pytest.raises(docker.errors.TLSParameterError):
+ tls_config_from_options(options)
+
+ options = {'--tlskey': self.key}
+ with pytest.raises(docker.errors.TLSParameterError):
+ tls_config_from_options(options)
+
+ def test_assert_hostname_explicit_skip(self):
+ options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True}
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.assert_hostname is False
+
+ def test_tls_client_and_ca_quoted_paths(self):
+ options = {
+ '--tlscacert': '"{0}"'.format(self.ca_cert),
+ '--tlscert': '"{0}"'.format(self.client_cert),
+ '--tlskey': '"{0}"'.format(self.key),
+ '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (self.client_cert, self.key)
+ assert result.ca_cert == self.ca_cert
+ assert result.verify is True
+
+ def test_tls_simple_with_tls_version(self):
+ tls_version = 'TLSv1'
+ options = {'--tls': True}
+ environment = {'COMPOSE_TLS_VERSION': tls_version}
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ssl_version == ssl.PROTOCOL_TLSv1
+
+
+class TestGetTlsVersion(object):
+ def test_get_tls_version_default(self):
+ environment = {}
+ assert get_tls_version(environment) is None
+
+ @pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
+ def test_get_tls_version_upgrade(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
+ assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
+
+ def test_get_tls_version_unavailable(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
+ with mock.patch('compose.cli.docker_client.log') as mock_log:
+ tls_version = get_tls_version(environment)
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ assert tls_version is None
diff --git a/tests/unit/cli/errors_test.py b/tests/unit/cli/errors_test.py
new file mode 100644
index 00000000..68326d1c
--- /dev/null
+++ b/tests/unit/cli/errors_test.py
@@ -0,0 +1,88 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+from docker.errors import APIError
+from requests.exceptions import ConnectionError
+
+from compose.cli import errors
+from compose.cli.errors import handle_connection_errors
+from compose.const import IS_WINDOWS_PLATFORM
+from tests import mock
+
+
+@pytest.yield_fixture
+def mock_logging():
+ with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
+ yield mock_log
+
+
+def patch_find_executable(side_effect):
+ return mock.patch(
+ 'compose.cli.errors.find_executable',
+ autospec=True,
+ side_effect=side_effect)
+
+
+class TestHandleConnectionErrors(object):
+
+ def test_generic_connection_error(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with patch_find_executable(['/bin/docker', None]):
+ with handle_connection_errors(mock.Mock()):
+ raise ConnectionError()
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Couldn't connect to Docker daemon" in args[0]
+
+ def test_api_error_version_mismatch(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, b"client is newer than server")
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Docker Engine of version 1.10.0 or greater" in args[0]
+
+ def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, u"client is newer than server")
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Docker Engine of version 1.10.0 or greater" in args[0]
+
+ def test_api_error_version_other(self, mock_logging):
+ msg = b"Something broke!"
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, msg)
+
+ mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
+
+ def test_api_error_version_other_unicode_explanation(self, mock_logging):
+ msg = u"Something broke!"
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, msg)
+
+ mock_logging.error.assert_called_once_with(msg)
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_no_data(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "The current Compose file version is not compatible with your engine version." in args[0]
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_misc(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
diff --git a/tests/unit/cli/formatter_test.py b/tests/unit/cli/formatter_test.py
new file mode 100644
index 00000000..4aa025e6
--- /dev/null
+++ b/tests/unit/cli/formatter_test.py
@@ -0,0 +1,53 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from compose.cli import colors
+from compose.cli.formatter import ConsoleWarningFormatter
+from tests import unittest
+
+
+MESSAGE = 'this is the message'
+
+
+def make_log_record(level, message=None):
+ return logging.LogRecord('name', level, 'pathame', 0, message or MESSAGE, (), None)
+
+
+class ConsoleWarningFormatterTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.formatter = ConsoleWarningFormatter()
+
+ def test_format_warn(self):
+ output = self.formatter.format(make_log_record(logging.WARN))
+ expected = colors.yellow('WARNING') + ': '
+ assert output == expected + MESSAGE
+
+ def test_format_error(self):
+ output = self.formatter.format(make_log_record(logging.ERROR))
+ expected = colors.red('ERROR') + ': '
+ assert output == expected + MESSAGE
+
+ def test_format_info(self):
+ output = self.formatter.format(make_log_record(logging.INFO))
+ assert output == MESSAGE
+
+ def test_format_unicode_info(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.INFO, message))
+ print(output)
+ assert output == message.decode('utf-8')
+
+ def test_format_unicode_warn(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.WARN, message))
+ expected = colors.yellow('WARNING') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
+
+ def test_format_unicode_error(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.ERROR, message))
+ expected = colors.red('ERROR') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py
new file mode 100644
index 00000000..d0c4b56b
--- /dev/null
+++ b/tests/unit/cli/log_printer_test.py
@@ -0,0 +1,201 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import itertools
+
+import pytest
+import requests
+import six
+from docker.errors import APIError
+from six.moves.queue import Queue
+
+from compose.cli.log_printer import build_log_generator
+from compose.cli.log_printer import build_log_presenters
+from compose.cli.log_printer import build_no_log_generator
+from compose.cli.log_printer import consume_queue
+from compose.cli.log_printer import QueueItem
+from compose.cli.log_printer import wait_on_exit
+from compose.cli.log_printer import watch_events
+from compose.container import Container
+from tests import mock
+
+
+@pytest.fixture
+def output_stream():
+ output = six.StringIO()
+ output.flush = mock.Mock()
+ return output
+
+
+@pytest.fixture
+def mock_container():
+ return mock.Mock(spec=Container, name_without_project='web_1')
+
+
+class TestLogPresenter(object):
+
+ def test_monochrome(self, mock_container):
+ presenters = build_log_presenters(['foo', 'bar'], True)
+ presenter = next(presenters)
+ actual = presenter.present(mock_container, "this line")
+ assert actual == "web_1 | this line"
+
+ def test_polychrome(self, mock_container):
+ presenters = build_log_presenters(['foo', 'bar'], False)
+ presenter = next(presenters)
+ actual = presenter.present(mock_container, "this line")
+ assert '\033[' in actual
+
+
+def test_wait_on_exit():
+ exit_status = 3
+ mock_container = mock.Mock(
+ spec=Container,
+ name='cname',
+ wait=mock.Mock(return_value=exit_status))
+
+ expected = '{} exited with code {}\n'.format(mock_container.name, exit_status)
+ assert expected == wait_on_exit(mock_container)
+
+
+def test_wait_on_exit_raises():
+ status_code = 500
+
+ def mock_wait():
+ resp = requests.Response()
+ resp.status_code = status_code
+ raise APIError('Bad server', resp)
+
+ mock_container = mock.Mock(
+ spec=Container,
+ name='cname',
+ wait=mock_wait
+ )
+
+ expected = 'Unexpected API error for {} (HTTP code {})\n'.format(
+ mock_container.name, status_code,
+ )
+ assert expected in wait_on_exit(mock_container)
+
+
+def test_build_no_log_generator(mock_container):
+ mock_container.has_api_logs = False
+ mock_container.log_driver = 'none'
+ output, = build_no_log_generator(mock_container, None)
+ assert "WARNING: no logs are available with the 'none' log driver\n" in output
+ assert "exited with code" not in output
+
+
+class TestBuildLogGenerator(object):
+
+ def test_no_log_stream(self, mock_container):
+ mock_container.log_stream = None
+ mock_container.logs.return_value = iter([b"hello\nworld"])
+ log_args = {'follow': True}
+
+ generator = build_log_generator(mock_container, log_args)
+ assert next(generator) == "hello\n"
+ assert next(generator) == "world"
+ mock_container.logs.assert_called_once_with(
+ stdout=True,
+ stderr=True,
+ stream=True,
+ **log_args)
+
+ def test_with_log_stream(self, mock_container):
+ mock_container.log_stream = iter([b"hello\nworld"])
+ log_args = {'follow': True}
+
+ generator = build_log_generator(mock_container, log_args)
+ assert next(generator) == "hello\n"
+ assert next(generator) == "world"
+
+ def test_unicode(self, output_stream):
+ glyph = u'\u2022\n'
+ mock_container.log_stream = iter([glyph.encode('utf-8')])
+
+ generator = build_log_generator(mock_container, {})
+ assert next(generator) == glyph
+
+
+@pytest.fixture
+def thread_map():
+ return {'cid': mock.Mock()}
+
+
+@pytest.fixture
+def mock_presenters():
+ return itertools.cycle([mock.Mock()])
+
+
+class TestWatchEvents(object):
+
+ def test_stop_event(self, thread_map, mock_presenters):
+ event_stream = [{'action': 'stop', 'id': 'cid'}]
+ watch_events(thread_map, event_stream, mock_presenters, ())
+ assert not thread_map
+
+ def test_start_event(self, thread_map, mock_presenters):
+ container_id = 'abcd'
+ event = {'action': 'start', 'id': container_id, 'container': mock.Mock()}
+ event_stream = [event]
+ thread_args = 'foo', 'bar'
+
+ with mock.patch(
+ 'compose.cli.log_printer.build_thread',
+ autospec=True
+ ) as mock_build_thread:
+ watch_events(thread_map, event_stream, mock_presenters, thread_args)
+ mock_build_thread.assert_called_once_with(
+ event['container'],
+ next(mock_presenters),
+ *thread_args)
+ assert container_id in thread_map
+
+ def test_other_event(self, thread_map, mock_presenters):
+ container_id = 'abcd'
+ event_stream = [{'action': 'create', 'id': container_id}]
+ watch_events(thread_map, event_stream, mock_presenters, ())
+ assert container_id not in thread_map
+
+
+class TestConsumeQueue(object):
+
+ def test_item_is_an_exception(self):
+
+ class Problem(Exception):
+ pass
+
+ queue = Queue()
+ error = Problem('oops')
+ for item in QueueItem.new('a'), QueueItem.new('b'), QueueItem.exception(error):
+ queue.put(item)
+
+ generator = consume_queue(queue, False)
+ assert next(generator) == 'a'
+ assert next(generator) == 'b'
+ with pytest.raises(Problem):
+ next(generator)
+
+ def test_item_is_stop_without_cascade_stop(self):
+ queue = Queue()
+ for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'):
+ queue.put(item)
+
+ generator = consume_queue(queue, False)
+ assert next(generator) == 'a'
+ assert next(generator) == 'b'
+
+ def test_item_is_stop_with_cascade_stop(self):
+ """Return the name of the container that caused the cascade_stop"""
+ queue = Queue()
+ for item in QueueItem.stop('foobar-1'), QueueItem.new('a'), QueueItem.new('b'):
+ queue.put(item)
+
+ generator = consume_queue(queue, True)
+ assert next(generator) is 'foobar-1'
+
+ def test_item_is_none_when_timeout_is_hit(self):
+ queue = Queue()
+ generator = consume_queue(queue, False)
+ assert next(generator) is None
diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py
new file mode 100644
index 00000000..dc527880
--- /dev/null
+++ b/tests/unit/cli/main_test.py
@@ -0,0 +1,104 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+import pytest
+
+from compose import container
+from compose.cli.errors import UserError
+from compose.cli.formatter import ConsoleWarningFormatter
+from compose.cli.main import convergence_strategy_from_opts
+from compose.cli.main import filter_containers_to_service_names
+from compose.cli.main import setup_console_handler
+from compose.service import ConvergenceStrategy
+from tests import mock
+
+
+def mock_container(service, number):
+ return mock.create_autospec(
+ container.Container,
+ service=service,
+ number=number,
+ name_without_project='{0}_{1}'.format(service, number))
+
+
+@pytest.fixture
+def logging_handler():
+ stream = mock.Mock()
+ stream.isatty.return_value = True
+ return logging.StreamHandler(stream=stream)
+
+
+class TestCLIMainTestCase(object):
+
+ def test_filter_containers_to_service_names(self):
+ containers = [
+ mock_container('web', 1),
+ mock_container('web', 2),
+ mock_container('db', 1),
+ mock_container('other', 1),
+ mock_container('another', 1),
+ ]
+ service_names = ['web', 'db']
+ actual = filter_containers_to_service_names(containers, service_names)
+ assert actual == containers[:3]
+
+ def test_filter_containers_to_service_names_all(self):
+ containers = [
+ mock_container('web', 1),
+ mock_container('db', 1),
+ mock_container('other', 1),
+ ]
+ service_names = []
+ actual = filter_containers_to_service_names(containers, service_names)
+ assert actual == containers
+
+
+class TestSetupConsoleHandlerTestCase(object):
+
+ def test_with_tty_verbose(self, logging_handler):
+ setup_console_handler(logging_handler, True)
+ assert type(logging_handler.formatter) == ConsoleWarningFormatter
+ assert '%(name)s' in logging_handler.formatter._fmt
+ assert '%(funcName)s' in logging_handler.formatter._fmt
+
+ def test_with_tty_not_verbose(self, logging_handler):
+ setup_console_handler(logging_handler, False)
+ assert type(logging_handler.formatter) == ConsoleWarningFormatter
+ assert '%(name)s' not in logging_handler.formatter._fmt
+ assert '%(funcName)s' not in logging_handler.formatter._fmt
+
+ def test_with_not_a_tty(self, logging_handler):
+ logging_handler.stream.isatty.return_value = False
+ setup_console_handler(logging_handler, False)
+ assert type(logging_handler.formatter) == logging.Formatter
+
+
+class TestConvergeStrategyFromOptsTestCase(object):
+
+ def test_invalid_opts(self):
+ options = {'--force-recreate': True, '--no-recreate': True}
+ with pytest.raises(UserError):
+ convergence_strategy_from_opts(options)
+
+ def test_always(self):
+ options = {'--force-recreate': True, '--no-recreate': False}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.always
+ )
+
+ def test_never(self):
+ options = {'--force-recreate': False, '--no-recreate': True}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.never
+ )
+
+ def test_changed(self):
+ options = {'--force-recreate': False, '--no-recreate': False}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.changed
+ )
diff --git a/tests/unit/cli/utils_test.py b/tests/unit/cli/utils_test.py
new file mode 100644
index 00000000..066fb359
--- /dev/null
+++ b/tests/unit/cli/utils_test.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import unittest
+
+from compose.cli.utils import unquote_path
+
+
+class UnquotePathTest(unittest.TestCase):
+ def test_no_quotes(self):
+ assert unquote_path('hello') == 'hello'
+
+ def test_simple_quotes(self):
+ assert unquote_path('"hello"') == 'hello'
+
+ def test_uneven_quotes(self):
+ assert unquote_path('"hello') == '"hello'
+ assert unquote_path('hello"') == 'hello"'
+
+ def test_nested_quotes(self):
+ assert unquote_path('""hello""') == '"hello"'
+ assert unquote_path('"hel"lo"') == 'hel"lo'
+ assert unquote_path('"hello""') == 'hello"'
diff --git a/tests/unit/cli/verbose_proxy_test.py b/tests/unit/cli/verbose_proxy_test.py
new file mode 100644
index 00000000..f77568dc
--- /dev/null
+++ b/tests/unit/cli/verbose_proxy_test.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+
+from compose.cli import verbose_proxy
+from tests import unittest
+
+
+class VerboseProxyTestCase(unittest.TestCase):
+
+ def test_format_call(self):
+ prefix = '' if six.PY3 else 'u'
+ expected = "(%(p)s'arg1', True, key=%(p)s'value')" % dict(p=prefix)
+ actual = verbose_proxy.format_call(
+ ("arg1", True),
+ {'key': 'value'})
+
+ self.assertEqual(expected, actual)
+
+ def test_format_return_sequence(self):
+ expected = "(list with 10 items)"
+ actual = verbose_proxy.format_return(list(range(10)), 2)
+ self.assertEqual(expected, actual)
+
+ def test_format_return(self):
+ expected = repr({'Id': 'ok'})
+ actual = verbose_proxy.format_return({'Id': 'ok'}, 2)
+ self.assertEqual(expected, actual)
+
+ def test_format_return_no_result(self):
+ actual = verbose_proxy.format_return(None, 2)
+ self.assertEqual(None, actual)
diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py
new file mode 100644
index 00000000..f9ce240a
--- /dev/null
+++ b/tests/unit/cli_test.py
@@ -0,0 +1,214 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from io import StringIO
+
+import docker
+import py
+import pytest
+
+from .. import mock
+from .. import unittest
+from ..helpers import build_config
+from compose.cli.command import get_project
+from compose.cli.command import get_project_name
+from compose.cli.docopt_command import NoSuchCommand
+from compose.cli.errors import UserError
+from compose.cli.main import TopLevelCommand
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.project import Project
+
+
+class CLITestCase(unittest.TestCase):
+
+ def test_default_project_name(self):
+ test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
+ with test_dir.as_cwd():
+ project_name = get_project_name('.')
+ self.assertEqual('simplecomposefile', project_name)
+
+ def test_project_name_with_explicit_base_dir(self):
+ base_dir = 'tests/fixtures/simple-composefile'
+ project_name = get_project_name(base_dir)
+ self.assertEqual('simplecomposefile', project_name)
+
+ def test_project_name_with_explicit_uppercase_base_dir(self):
+ base_dir = 'tests/fixtures/UpperCaseDir'
+ project_name = get_project_name(base_dir)
+ self.assertEqual('uppercasedir', project_name)
+
+ def test_project_name_with_explicit_project_name(self):
+ name = 'explicit-project-name'
+ project_name = get_project_name(None, project_name=name)
+ self.assertEqual('explicitprojectname', project_name)
+
+ @mock.patch.dict(os.environ)
+ def test_project_name_from_environment_new_var(self):
+ name = 'namefromenv'
+ os.environ['COMPOSE_PROJECT_NAME'] = name
+ project_name = get_project_name(None)
+ self.assertEqual(project_name, name)
+
+ def test_project_name_with_empty_environment_var(self):
+ base_dir = 'tests/fixtures/simple-composefile'
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_PROJECT_NAME'] = ''
+ project_name = get_project_name(base_dir)
+ self.assertEqual('simplecomposefile', project_name)
+
+ @mock.patch.dict(os.environ)
+ def test_project_name_with_environment_file(self):
+ base_dir = tempfile.mkdtemp()
+ try:
+ name = 'namefromenvfile'
+ with open(os.path.join(base_dir, '.env'), 'w') as f:
+ f.write('COMPOSE_PROJECT_NAME={}'.format(name))
+ project_name = get_project_name(base_dir)
+ assert project_name == name
+
+ # Environment has priority over .env file
+ os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv'
+ assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME']
+ finally:
+ shutil.rmtree(base_dir)
+
+ def test_get_project(self):
+ base_dir = 'tests/fixtures/longer-filename-composefile'
+ project = get_project(base_dir)
+ self.assertEqual(project.name, 'longerfilenamecomposefile')
+ self.assertTrue(project.client)
+ self.assertTrue(project.services)
+
+ def test_command_help(self):
+ with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
+ TopLevelCommand.help({'COMMAND': 'up'})
+
+ assert "Usage: up" in fake_stdout.getvalue()
+
+ def test_command_help_nonexistent(self):
+ with pytest.raises(NoSuchCommand):
+ TopLevelCommand.help({'COMMAND': 'nonexistent'})
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
+ @mock.patch('compose.cli.main.RunOperation', autospec=True)
+ @mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
+ def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
+ mock_client = mock.create_autospec(docker.APIClient)
+ project = Project.from_config(
+ name='composetest',
+ client=mock_client,
+ config_data=build_config({
+ 'service': {'image': 'busybox'}
+ }),
+ )
+ command = TopLevelCommand(project)
+
+ with pytest.raises(SystemExit):
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': False,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': None,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ _, _, call_kwargs = mock_run_operation.mock_calls[0]
+ assert call_kwargs['logs'] is False
+
+ def test_run_service_with_restart_always(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+
+ project = Project.from_config(
+ name='composetest',
+ client=mock_client,
+ config_data=build_config({
+ 'service': {
+ 'image': 'busybox',
+ 'restart': 'always',
+ }
+ }),
+ )
+
+ command = TopLevelCommand(project)
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': None,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ self.assertEqual(
+ mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
+ 'always'
+ )
+
+ command = TopLevelCommand(project)
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': True,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ self.assertFalse(
+ mock_client.create_host_config.call_args[1].get('restart_policy')
+ )
+
+ def test_command_manual_and_service_ports_together(self):
+ project = Project.from_config(
+ name='composetest',
+ client=None,
+ config_data=build_config({
+ 'service': {'image': 'busybox'},
+ }),
+ )
+ command = TopLevelCommand(project)
+
+ with self.assertRaises(UserError):
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': True,
+ '--publish': ['80:80'],
+ '--rm': None,
+ '--name': None,
+ })
diff --git a/tests/unit/config/__init__.py b/tests/unit/config/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/config/__init__.py
diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py
new file mode 100644
index 00000000..8e3d4e2e
--- /dev/null
+++ b/tests/unit/config/config_test.py
@@ -0,0 +1,4482 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from operator import itemgetter
+
+import py
+import pytest
+import yaml
+
+from ...helpers import build_config_details
+from compose.config import config
+from compose.config import types
+from compose.config.config import resolve_build_args
+from compose.config.config import resolve_environment
+from compose.config.environment import Environment
+from compose.config.errors import ConfigurationError
+from compose.config.errors import VERSION_EXPLANATION
+from compose.config.serialize import denormalize_service_dict
+from compose.config.serialize import serialize_config
+from compose.config.serialize import serialize_ns_time_value
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import nanoseconds_from_time_seconds
+from tests import mock
+from tests import unittest
+
+DEFAULT_VERSION = V2_0
+
+
+def make_service_dict(name, service_dict, working_dir, filename=None):
+ """Test helper function to construct a ServiceExtendsResolver
+ """
+ resolver = config.ServiceExtendsResolver(
+ config.ServiceConfig(
+ working_dir=working_dir,
+ filename=filename,
+ name=name,
+ config=service_dict),
+ config.ConfigFile(filename=filename, config={}),
+ environment=Environment.from_env_file(working_dir)
+ )
+ return config.process_service(resolver.run())
+
+
+def service_sort(services):
+ return sorted(services, key=itemgetter('name'))
+
+
+def secret_sort(secrets):
+ return sorted(secrets, key=itemgetter('source'))
+
+
+class ConfigTest(unittest.TestCase):
+
+ def test_load(self):
+ service_dicts = config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox'},
+ 'bar': {'image': 'busybox', 'environment': ['FOO=1']},
+ },
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
+ )
+
+ def test_load_v2(self):
+ config_data = config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'foo': {'image': 'busybox'},
+ 'bar': {'image': 'busybox', 'environment': ['FOO=1']},
+ },
+ 'volumes': {
+ 'hello': {
+ 'driver': 'default',
+ 'driver_opts': {'beep': 'boop'}
+ }
+ },
+ 'networks': {
+ 'default': {
+ 'driver': 'bridge',
+ 'driver_opts': {'beep': 'boop'}
+ },
+ 'with_ipam': {
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.28.0.0/16'}
+ ]
+ }
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
+ }
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+ service_dicts = config_data.services
+ volume_dict = config_data.volumes
+ networks_dict = config_data.networks
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
+ )
+ self.assertEqual(volume_dict, {
+ 'hello': {
+ 'driver': 'default',
+ 'driver_opts': {'beep': 'boop'}
+ }
+ })
+ self.assertEqual(networks_dict, {
+ 'default': {
+ 'driver': 'bridge',
+ 'driver_opts': {'beep': 'boop'}
+ },
+ 'with_ipam': {
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.28.0.0/16'}
+ ]
+ }
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
+ }
+ })
+
+ def test_valid_versions(self):
+ for version in ['2', '2.0']:
+ cfg = config.load(build_config_details({'version': version}))
+ assert cfg.version == V2_0
+
+ cfg = config.load(build_config_details({'version': '2.1'}))
+ assert cfg.version == V2_1
+
+ cfg = config.load(build_config_details({'version': '2.2'}))
+ assert cfg.version == V2_2
+
+ cfg = config.load(build_config_details({'version': '2.3'}))
+ assert cfg.version == V2_3
+
+ for version in ['3', '3.0']:
+ cfg = config.load(build_config_details({'version': version}))
+ assert cfg.version == V3_0
+
+ cfg = config.load(build_config_details({'version': '3.1'}))
+ assert cfg.version == V3_1
+
+ def test_v1_file_version(self):
+ cfg = config.load(build_config_details({'web': {'image': 'busybox'}}))
+ assert cfg.version == V1
+ assert list(s['name'] for s in cfg.services) == ['web']
+
+ cfg = config.load(build_config_details({'version': {'image': 'busybox'}}))
+ assert cfg.version == V1
+ assert list(s['name'] for s in cfg.services) == ['version']
+
+ def test_wrong_version_type(self):
+ for version in [None, 1, 2, 2.0]:
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'version': version},
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is invalid - it should be a string.' \
+ in excinfo.exconly()
+
+ def test_unsupported_version(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'version': '2.18'},
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is unsupported' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_version_1_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '1',
+ 'web': {'image': 'busybox'},
+ },
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is invalid' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_v1_file_with_version_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'web': {'image': 'busybox'},
+ },
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Invalid top-level property "web"' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_named_volume_config_empty(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': None,
+ 'other': {},
+ }
+ })
+ config_result = config.load(config_details)
+ volumes = config_result.volumes
+ assert 'simple' in volumes
+ assert volumes['simple'] == {}
+ assert volumes['other'] == {}
+
+ def test_named_volume_numeric_driver_opt(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': {'driver_opts': {'size': 42}},
+ }
+ })
+ cfg = config.load(config_details)
+ assert cfg.volumes['simple']['driver_opts']['size'] == '42'
+
+ def test_volume_invalid_driver_opt(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': {'driver_opts': {'size': True}},
+ }
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert 'driver_opts.size contains an invalid type' in exc.exconly()
+
+ def test_named_volume_invalid_type_list(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': []
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "volume must be a mapping, not an array" in exc.exconly()
+
+ def test_networks_invalid_type_list(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'networks': []
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "network must be a mapping, not an array" in exc.exconly()
+
+ def test_load_service_with_name_version(self):
+ with mock.patch('compose.config.config.log') as mock_logging:
+ config_data = config.load(
+ build_config_details({
+ 'version': {
+ 'image': 'busybox'
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+
+ assert 'Unexpected type for "version" key in "filename.yml"' \
+ in mock_logging.warn.call_args[0][0]
+
+ service_dicts = config_data.services
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'version',
+ 'image': 'busybox',
+ }
+ ])
+ )
+
+ def test_load_throws_error_when_not_dict(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {'web': 'busybox:latest'},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_load_throws_error_when_not_dict_v2(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {'version': '2', 'services': {'web': 'busybox:latest'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_load_throws_error_with_invalid_network_fields(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {'web': 'busybox:latest'},
+ 'networks': {
+ 'invalid': {'foo', 'bar'}
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+
+ def test_load_config_link_local_ips_network(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }
+ },
+ 'networks': {'foobar': {}}
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+
+ def test_load_config_service_labels(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'labels': ['label_key=label_val']
+ },
+ 'db': {
+ 'image': 'example/db',
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ for service in service_dicts:
+ assert service['labels'] == {
+ 'label_key': 'label_val'
+ }
+
+ def test_load_config_volume_and_network_labels(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ },
+ },
+ 'networks': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ 'volumes': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ loaded_config = config.load(details)
+
+ assert loaded_config.networks == {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+
+ assert loaded_config.volumes == {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+
+ def test_load_config_invalid_service_names(self):
+ for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {invalid_name: {'image': 'busybox'}}))
+ assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
+
+ def test_load_config_invalid_service_names_v2(self):
+ for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'version': '2',
+ 'services': {invalid_name: {'image': 'busybox'}},
+ }))
+ assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
+
+ def test_load_with_invalid_field_name(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {'image': 'busybox', 'name': 'bogus'},
+ }
+ },
+ 'working_dir',
+ 'filename.yml',
+ ))
+
+ assert "Unsupported config option for services.web: 'name'" in exc.exconly()
+
+ def test_load_with_invalid_field_name_v1(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {'image': 'busybox', 'name': 'bogus'},
+ },
+ 'working_dir',
+ 'filename.yml',
+ ))
+
+ assert "Unsupported config option for web: 'name'" in exc.exconly()
+
+ def test_load_invalid_service_definition(self):
+ config_details = build_config_details(
+ {'web': 'wrong'},
+ 'working_dir',
+ 'filename.yml')
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "service 'web' must be a mapping not a string." in exc.exconly()
+
+ def test_load_with_empty_build_args(self):
+ config_details = build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'args': None,
+ },
+ },
+ },
+ }
+ )
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert (
+ "services.web.build.args contains an invalid type, it should be an "
+ "object, or an array" in exc.exconly()
+ )
+
+ def test_config_integer_service_name_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {1: {'image': 'busybox'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert (
+ "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'" in
+ excinfo.exconly()
+ )
+
+ def test_config_integer_service_name_raise_validation_error_v2(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {1: {'image': 'busybox'}}
+ },
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert (
+ "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in
+ excinfo.exconly()
+ )
+
+ def test_config_invalid_service_name_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'test_app': {'build': '.'},
+ 'mong\\o': {'image': 'mongo'},
+ }
+ })
+ )
+
+ assert 'Invalid service name \'mong\\o\'' in excinfo.exconly()
+
+ def test_config_duplicate_cache_from_values_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'test': {'build': {'context': '.', 'cache_from': ['a', 'b', 'a']}}
+ }
+
+ })
+ )
+
+ assert 'build.cache_from contains non-unique items' in exc.exconly()
+
+ def test_load_with_multiple_files_v1(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {
+ 'image': 'example/web',
+ 'links': ['db'],
+ },
+ 'db': {
+ 'image': 'example/db',
+ },
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'web': {
+ 'build': '/',
+ 'volumes': ['/home/user/project:/code'],
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'build': {'context': os.path.abspath('/')},
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'links': ['db'],
+ },
+ {
+ 'name': 'db',
+ 'image': 'example/db',
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_with_multiple_files_and_empty_override(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {'web': {'image': 'example/web'}})
+ override_file = config.ConfigFile('override.yml', None)
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ error_msg = "Top level object in 'override.yml' needs to be an object"
+ assert error_msg in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_override_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {'version': '2', 'services': {'web': {'image': 'example/web'}}})
+ override_file = config.ConfigFile('override.yml', None)
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ error_msg = "Top level object in 'override.yml' needs to be an object"
+ assert error_msg in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_base(self):
+ base_file = config.ConfigFile('base.yml', None)
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {'web': {'image': 'example/web'}})
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_base_v2(self):
+ base_file = config.ConfigFile('base.yml', None)
+ override_file = config.ConfigFile(
+ 'override.tml',
+ {'version': '2', 'services': {'web': {'image': 'example/web'}}}
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
+
+ def test_load_with_multiple_files_and_extends_in_override_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {'image': 'example/web'},
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'web': {
+ 'extends': {
+ 'file': 'common.yml',
+ 'service': 'base',
+ },
+ 'volumes': ['/home/user/project:/code'],
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ tmpdir = py.test.ensuretemp('config_test')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('common.yml').write("""
+ base:
+ labels: ['label=one']
+ """)
+ with tmpdir.as_cwd():
+ service_dicts = config.load(details).services
+
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'labels': {'label': 'one'},
+ },
+ ]
+ self.assertEqual(service_sort(service_dicts), service_sort(expected))
+
+ def test_load_mixed_extends_resolution(self):
+ main_file = config.ConfigFile(
+ 'main.yml', {
+ 'version': '2.2',
+ 'services': {
+ 'prodweb': {
+ 'extends': {
+ 'service': 'web',
+ 'file': 'base.yml'
+ },
+ 'environment': {'PROD': 'true'},
+ },
+ },
+ }
+ )
+
+ tmpdir = pytest.ensuretemp('config_test')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('base.yml').write("""
+ version: '2.2'
+ services:
+ base:
+ image: base
+ web:
+ extends: base
+ """)
+
+ details = config.ConfigDetails('.', [main_file])
+ with tmpdir.as_cwd():
+ service_dicts = config.load(details).services
+ assert service_dicts[0] == {
+ 'name': 'prodweb',
+ 'image': 'base',
+ 'environment': {'PROD': 'true'},
+ }
+
+ def test_load_with_multiple_files_and_invalid_override(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {'web': {'image': 'example/web'}})
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {'bogus': 'thing'})
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "service 'bogus' must be a mapping not a string." in exc.exconly()
+ assert "In file 'override.yaml'" in exc.exconly()
+
+ def test_load_sorts_in_dependency_order(self):
+ config_details = build_config_details({
+ 'web': {
+ 'image': 'busybox:latest',
+ 'links': ['db'],
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['volume:ro']
+ },
+ 'volume': {
+ 'image': 'busybox:latest',
+ 'volumes': ['/tmp'],
+ }
+ })
+ services = config.load(config_details).services
+
+ assert services[0]['name'] == 'volume'
+ assert services[1]['name'] == 'db'
+ assert services[2]['name'] == 'web'
+
+ def test_load_with_extensions(self):
+ config_details = build_config_details({
+ 'version': '2.3',
+ 'x-data': {
+ 'lambda': 3,
+ 'excess': [True, {}]
+ }
+ })
+
+ config_data = config.load(config_details)
+ assert config_data.services == []
+
+ def test_config_build_configuration(self):
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'build': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }},
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services
+ self.assertTrue('context' in service[0]['build'])
+ self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+
+ def test_config_build_configuration_v2(self):
+ # service.dockerfile is invalid in v2
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ service = config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '.'
+ }
+ }
+ }, 'tests/fixtures/extends', 'filename.yml')
+ ).services[0]
+ self.assertTrue('context' in service['build'])
+
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services
+ self.assertTrue('context' in service[0]['build'])
+ self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+
+ def test_load_with_buildargs(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'opt1': 42,
+ 'opt2': 'foobar'
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'opt1' in service['build']['args']
+ assert isinstance(service['build']['args']['opt1'], str)
+ assert service['build']['args']['opt1'] == '42'
+ assert service['build']['args']['opt2'] == 'foobar'
+
+ def test_load_build_labels_dict(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': str(V3_3),
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'labels': {
+ 'label1': 42,
+ 'label2': 'foobar'
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'labels' in service['build']
+ assert 'label1' in service['build']['labels']
+ assert service['build']['labels']['label1'] == 42
+ assert service['build']['labels']['label2'] == 'foobar'
+
+ def test_load_build_labels_list(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'labels': ['foo=bar', 'baz=true', 'foobar=1']
+ },
+ },
+ },
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ service = config.load(details).services[0]
+ assert service['build']['labels'] == {
+ 'foo': 'bar', 'baz': 'true', 'foobar': '1'
+ }
+
+ def test_build_args_allow_empty_properties(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'foo': None
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'foo' in service['build']['args']
+ assert service['build']['args']['foo'] == ''
+
+ # If build argument is None then it will be converted to the empty
+ # string. Make sure that int zero kept as it is, i.e. not converted to
+ # the empty string
+ def test_build_args_check_zero_preserved(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'foo': 0
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'foo' in service['build']['args']
+ assert service['build']['args']['foo'] == '0'
+
+ def test_load_with_multiple_files_mismatched_networks_format(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {'aliases': ['foo', 'bar']}
+ }
+ }
+ },
+ 'networks': {'foobar': {}, 'baz': {}}
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'networks': ['baz']
+ }
+ }
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file, override_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {'aliases': ['foo', 'bar']},
+ 'baz': None
+ }
+
+ def test_load_with_multiple_files_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'depends_on': ['db'],
+ },
+ 'db': {
+ 'image': 'example/db',
+ }
+ },
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '/',
+ 'volumes': ['/home/user/project:/code'],
+ 'depends_on': ['other'],
+ },
+ 'other': {
+ 'image': 'example/other',
+ }
+ }
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'build': {'context': os.path.abspath('/')},
+ 'image': 'example/web',
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'depends_on': {
+ 'db': {'condition': 'service_started'},
+ 'other': {'condition': 'service_started'},
+ },
+ },
+ {
+ 'name': 'db',
+ 'image': 'example/db',
+ },
+ {
+ 'name': 'other',
+ 'image': 'example/other',
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ @mock.patch.dict(os.environ)
+ def test_load_with_multiple_files_v3_2(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': [
+ {'source': '/a', 'target': '/b', 'type': 'bind'},
+ {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
+ ]
+ }
+ },
+ 'volumes': {'vol': {}}
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b', '/anonymous']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
+ assert sorted(svc_volumes) == sorted(
+ ['/anonymous', '/c:/b:rw', 'vol:/x:ro']
+ )
+
+ @mock.patch.dict(os.environ)
+ def test_volume_mode_override(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': ['/c:/b:rw']
+ }
+ },
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b:ro']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = list(map(lambda v: v.repr(), service_dicts[0]['volumes']))
+ assert svc_volumes == ['/c:/b:ro']
+
+ def test_undeclared_volume_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['data0028:/data:ro'],
+ },
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ with self.assertRaises(ConfigurationError):
+ config.load(details)
+
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['./data0028:/data:ro'],
+ },
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ volume = config_data.services[0].get('volumes')[0]
+ assert not volume.is_named_volume
+
+ def test_undeclared_volume_v1(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['data0028:/data:ro'],
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ volume = config_data.services[0].get('volumes')[0]
+ assert volume.external == 'data0028'
+ assert volume.is_named_volume
+
+ def test_config_valid_service_names(self):
+ for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
+ services = config.load(
+ build_config_details(
+ {valid_name: {'image': 'busybox'}},
+ 'tests/fixtures/extends',
+ 'common.yml')).services
+ assert services[0]['name'] == valid_name
+
+ def test_config_hint(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'privilige': 'something'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "(did you mean 'privileged'?)" in excinfo.exconly()
+
+ def test_load_errors_on_uppercase_with_no_image(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'Foo': {'build': '.'},
+ }, 'tests/fixtures/build-ctx'))
+ assert "Service 'Foo' contains uppercase characters" in exc.exconly()
+
+ def test_invalid_config_v1(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 1},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.image contains an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_invalid_config_v2(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'foo': {'image': 1},
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "services.foo.image contains an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_invalid_config_build_and_image_specified_v1(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'build': '.'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo has both an image and build path specified." in excinfo.exconly()
+
+ def test_invalid_config_type_should_be_an_array(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'links': 'an_link'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.links contains an invalid type, it should be an array" \
+ in excinfo.exconly()
+
+ def test_invalid_config_not_a_dictionary(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ ['foo', 'lol'],
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "Top level object in 'filename.yml' needs to be an object" \
+ in excinfo.exconly()
+
+ def test_invalid_config_not_unique_items(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'build': '.', 'devices': ['/dev/foo:/dev/foo', '/dev/foo:/dev/foo']}
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "has non-unique elements" in excinfo.exconly()
+
+ def test_invalid_list_of_strings_format(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'build': '.', 'command': [1]}
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.command contains 1, which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_load_config_dockerfile_without_build_raises_error_v1(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'dockerfile': 'Dockerfile.alt'
+ }
+ }))
+
+ assert "web has both an image and alternate Dockerfile." in exc.exconly()
+
+ def test_config_extra_hosts_string_raises_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'extra_hosts': 'somehost:162.242.195.82'
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extra_hosts contains an invalid type" \
+ in excinfo.exconly()
+
+ def test_config_extra_hosts_list_of_dicts_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'extra_hosts': [
+ {'somehost': '162.242.195.82'},
+ {'otherhost': '50.31.209.229'}
+ ]
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extra_hosts contains {\"somehost\": \"162.242.195.82\"}, " \
+ "which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_config_ulimits_invalid_keys_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nofile': {
+ "not_soft_or_hard": 100,
+ "soft": 10000,
+ "hard": 20000,
+ }
+ }
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+
+ assert "web.ulimits.nofile contains unsupported option: 'not_soft_or_hard'" \
+ in exc.exconly()
+
+ def test_config_ulimits_required_keys_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {'nofile': {"soft": 10000}}
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+ assert "web.ulimits.nofile" in exc.exconly()
+ assert "'hard' is a required property" in exc.exconly()
+
+ def test_config_ulimits_soft_greater_than_hard_error(self):
+ expected = "'soft' value can not be greater than 'hard' value"
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nofile': {"soft": 10000, "hard": 1000}
+ }
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+ assert expected in exc.exconly()
+
+ def test_valid_config_which_allows_two_type_definitions(self):
+ expose_values = [["8000"], [8000]]
+ for expose in expose_values:
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'expose': expose
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(service[0]['expose'], expose)
+
+ def test_valid_config_oneof_string_or_list(self):
+ entrypoint_values = [["sh"], "sh"]
+ for entrypoint in entrypoint_values:
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'entrypoint': entrypoint
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(service[0]['entrypoint'], entrypoint)
+
+ def test_logs_warning_for_boolean_in_environment(self):
+ config_details = build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'environment': {'SHOW_STUFF': True}
+ }
+ })
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+
+ assert "contains true, which is an invalid type" in exc.exconly()
+
+ def test_config_valid_environment_dict_key_contains_dashes(self):
+ services = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'environment': {'SPRING_JPA_HIBERNATE_DDL-AUTO': 'none'}
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'], 'none')
+
+ def test_load_yaml_with_yaml_error(self):
+ tmpdir = py.test.ensuretemp('invalid_yaml_test')
+ self.addCleanup(tmpdir.remove)
+ invalid_yaml_file = tmpdir.join('docker-compose.yml')
+ invalid_yaml_file.write("""
+ web:
+ this is bogus: ok: what
+ """)
+ with pytest.raises(ConfigurationError) as exc:
+ config.load_yaml(str(invalid_yaml_file))
+
+ assert 'line 3, column 32' in exc.exconly()
+
+ def test_validate_extra_hosts_invalid(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'extra_hosts': "www.example.com: 192.168.0.17",
+ }
+ }))
+ assert "web.extra_hosts contains an invalid type" in exc.exconly()
+
+ def test_validate_extra_hosts_invalid_list(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'extra_hosts': [
+ {'www.example.com': '192.168.0.17'},
+ {'api.example.com': '192.168.0.18'}
+ ],
+ }
+ }))
+ assert "which is an invalid type" in exc.exconly()
+
+ def test_normalize_dns_options(self):
+ actual = config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'dns': '8.8.8.8',
+ 'dns_search': 'domain.local',
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'dns': ['8.8.8.8'],
+ 'dns_search': ['domain.local'],
+ }
+ ]
+
+ def test_tmpfs_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'tmpfs': '/run',
+ }
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'tmpfs': ['/run'],
+ }
+ ]
+
+ def test_oom_score_adj_option(self):
+
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ ]
+
+ def test_swappiness_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ ]
+
+ def test_group_add_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ ]
+
+ def test_dns_opt_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ ]
+
+ def test_isolation_option(self):
+ actual = config.load(build_config_details({
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'win10',
+ 'isolation': 'hyperv'
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'win10',
+ 'isolation': 'hyperv',
+ }
+ ]
+
+ def test_merge_service_dicts_from_files_with_extends_in_base(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+ override = {
+ 'image': 'alpine:edge',
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+
+ def test_merge_service_dicts_from_files_with_extends_in_override(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'extends': {'service': 'foo'}
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'foo'}
+ }
+
+ def test_merge_service_dicts_heterogeneous(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'ports': ['5432']
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': [5432]
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
+ }
+
+ def test_merge_service_dicts_heterogeneous_2(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
+ }
+
+ def test_merge_service_dicts_ports_sorting(self):
+ base = {
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432/udp']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert len(actual['ports']) == 2
+ assert types.ServicePort.parse('5432')[0] in actual['ports']
+ assert types.ServicePort.parse('5432/udp')[0] in actual['ports']
+
+ def test_merge_service_dicts_heterogeneous_volumes(self):
+ base = {
+ 'volumes': ['/a:/b', '/x:/z'],
+ }
+
+ override = {
+ 'image': 'alpine:edge',
+ 'volumes': [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'}
+ ]
+ }
+
+ actual = config.merge_service_dicts_from_files(
+ base, override, V3_2
+ )
+
+ assert actual['volumes'] == [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'},
+ '/x:/z'
+ ]
+
+ def test_merge_logging_v1(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'log_driver': 'something',
+ 'log_opt': {'foo': 'three'},
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'command': 'true',
+ }
+ actual = config.merge_service_dicts(base, override, V1)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'log_driver': 'something',
+ 'log_opt': {'foo': 'three'},
+ 'command': 'true',
+ }
+
+ def test_merge_logging_v2(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_override_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_base_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_drivers(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override_options(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog'
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ }
+ }
+
+ def test_merge_logging_v2_no_base(self):
+ base = {
+ 'image': 'alpine:edge'
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_mixed_ports(self):
+ base = {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [
+ {
+ 'target': '1245',
+ 'published': '1245',
+ 'protocol': 'udp',
+ }
+ ]
+ }
+
+ override = {
+ 'ports': ['1245:1245/udp']
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual == {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [types.ServicePort('1245', '1245', 'udp', None, None)]
+ }
+
+ def test_merge_depends_on_no_override(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == base
+
+ def test_merge_depends_on_mixed_syntax(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {
+ 'depends_on': ['app3']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'},
+ 'app3': {'condition': 'service_started'}
+ }
+ }
+
+ def test_empty_environment_key_allowed(self):
+ service_dict = config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'build': '.',
+ 'environment': {
+ 'POSTGRES_PASSWORD': ''
+ },
+ },
+ },
+ '.',
+ None,
+ )
+ ).services[0]
+ self.assertEqual(service_dict['environment']['POSTGRES_PASSWORD'], '')
+
+ def test_merge_pid(self):
+ # Regression: https://github.com/docker/compose/issues/4184
+ base = {
+ 'image': 'busybox',
+ 'pid': 'host'
+ }
+
+ override = {
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'busybox',
+ 'pid': 'host',
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ def test_merge_different_secrets(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'secrets': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert secret_sort(actual['secrets']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_secrets_override(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': ['src.txt'],
+ }
+ override = {
+ 'secrets': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual['secrets'] == override['secrets']
+
+ def test_merge_different_configs(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'configs': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert secret_sort(actual['configs']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_configs_override(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': ['src.txt'],
+ }
+ override = {
+ 'configs': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['configs'] == override['configs']
+
+ def test_merge_deploy(self):
+ base = {
+ 'image': 'busybox',
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == override['deploy']
+
+ def test_merge_deploy_override(self):
+ base = {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+
+ def test_merge_credential_spec(self):
+ base = {
+ 'image': 'bb',
+ 'credential_spec': {
+ 'file': '/hello-world',
+ }
+ }
+
+ override = {
+ 'credential_spec': {
+ 'registry': 'revolution.com',
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['credential_spec'] == override['credential_spec']
+
+ def test_merge_scale(self):
+ base = {
+ 'image': 'bar',
+ 'scale': 2,
+ }
+
+ override = {
+ 'scale': 4,
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {'image': 'bar', 'scale': 4}
+
+ def test_merge_blkio_config(self):
+ base = {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': 300,
+ 'weight_device': [
+ {'path': '/dev/sda1', 'weight': 200}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 300}
+ ],
+ 'device_write_iops': [
+ {'path': '/dev/sda1', 'rate': 1000}
+ ]
+ }
+ }
+
+ override = {
+ 'blkio_config': {
+ 'weight': 450,
+ 'weight_device': [
+ {'path': '/dev/sda2', 'weight': 400}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 2000}
+ ],
+ 'device_read_bps': [
+ {'path': '/dev/sda1', 'rate': 1024}
+ ]
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': override['blkio_config']['weight'],
+ 'weight_device': (
+ base['blkio_config']['weight_device'] +
+ override['blkio_config']['weight_device']
+ ),
+ 'device_read_iops': override['blkio_config']['device_read_iops'],
+ 'device_read_bps': override['blkio_config']['device_read_bps'],
+ 'device_write_iops': base['blkio_config']['device_write_iops']
+ }
+ }
+
+ def test_merge_extra_hosts(self):
+ base = {
+ 'image': 'bar',
+ 'extra_hosts': {
+ 'foo': '1.2.3.4',
+ }
+ }
+
+ override = {
+ 'extra_hosts': ['bar:5.6.7.8', 'foo:127.0.0.1']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual['extra_hosts'] == {
+ 'foo': '127.0.0.1',
+ 'bar': '5.6.7.8',
+ }
+
+ def test_merge_healthcheck_config(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'interval': 5000,
+ 'timeout': 10000,
+ 'test': ['echo', 'OK'],
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {
+ 'start_period': base['healthcheck']['start_period'],
+ 'test': override['healthcheck']['test'],
+ 'interval': override['healthcheck']['interval'],
+ 'timeout': override['healthcheck']['timeout'],
+ }
+
+ def test_merge_healthcheck_override_disables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {'disabled': True}
+
+ def test_merge_healthcheck_override_enables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': False,
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == override['healthcheck']
+
+ def test_external_volume_config(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'bogus': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'ext': {'external': True},
+ 'ext2': {'external': {'name': 'aliased'}}
+ }
+ })
+ config_result = config.load(config_details)
+ volumes = config_result.volumes
+ assert 'ext' in volumes
+ assert volumes['ext']['external'] is True
+ assert 'ext2' in volumes
+ assert volumes['ext2']['external']['name'] == 'aliased'
+
+ def test_external_volume_invalid_config(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'bogus': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'ext': {'external': True, 'driver': 'foo'}
+ }
+ })
+ with pytest.raises(ConfigurationError):
+ config.load(config_details)
+
+ def test_depends_on_orders_services(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'image': 'busybox', 'depends_on': ['three', 'two']},
+ 'two': {'image': 'busybox', 'depends_on': ['three']},
+ 'three': {'image': 'busybox'},
+ },
+ })
+ actual = config.load(config_details)
+ assert (
+ [service['name'] for service in actual.services] ==
+ ['three', 'two', 'one']
+ )
+
+ def test_depends_on_unknown_service_errors(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'image': 'busybox', 'depends_on': ['three']},
+ },
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "Service 'one' depends on service 'three'" in exc.exconly()
+
+ def test_linked_service_is_undefined(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {'image': 'busybox', 'links': ['db:db']},
+ },
+ })
+ )
+
+ def test_load_dockerfile_without_context(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'build': {'dockerfile': 'Dockerfile.foo'}},
+ },
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert 'has neither an image nor a build context' in exc.exconly()
+
+ def test_load_secrets(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_secrets_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': ['one'],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'secrets': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': ['one'],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'configs': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+
+class NetworkModeTest(unittest.TestCase):
+
+ def test_network_mode_standard(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'bridge',
+ },
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'bridge'
+
+ def test_network_mode_standard_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'bridge',
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'bridge'
+ assert 'net' not in config_data.services[0]
+
+ def test_network_mode_container(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'container:foo',
+ },
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'container:foo'
+
+ def test_network_mode_container_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'container:foo',
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'container:foo'
+
+ def test_network_mode_service(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'service:foo',
+ },
+ 'foo': {
+ 'image': 'busybox',
+ 'command': "top",
+ },
+ },
+ }))
+
+ assert config_data.services[1]['network_mode'] == 'service:foo'
+
+ def test_network_mode_service_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'container:foo',
+ },
+ 'foo': {
+ 'image': 'busybox',
+ 'command': "top",
+ },
+ }))
+
+ assert config_data.services[1]['network_mode'] == 'service:foo'
+
+ def test_network_mode_service_nonexistent(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'service:foo',
+ },
+ },
+ }))
+
+ assert "service 'foo' which is undefined" in excinfo.exconly()
+
+ def test_network_mode_plus_networks_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'bridge',
+ 'networks': ['front'],
+ },
+ },
+ 'networks': {
+ 'front': None,
+ }
+ }))
+
+ assert "'network_mode' and 'networks' cannot be combined" in excinfo.exconly()
+
+
+class PortsTest(unittest.TestCase):
+ INVALID_PORTS_TYPES = [
+ {"1": "8000"},
+ False,
+ "8000",
+ 8000,
+ ]
+
+ NON_UNIQUE_SINGLE_PORTS = [
+ ["8000", "8000"],
+ ]
+
+ INVALID_PORT_MAPPINGS = [
+ ["8000-8004:8000-8002"],
+ ["4242:4242-4244"],
+ ]
+
+ VALID_SINGLE_PORTS = [
+ ["8000"],
+ ["8000/tcp"],
+ ["8000", "9000"],
+ [8000],
+ [8000, 9000],
+ ]
+
+ VALID_PORT_MAPPINGS = [
+ ["8000:8050"],
+ ["49153-49154:3002-3003"],
+ ]
+
+ def test_config_invalid_ports_type_validation(self):
+ for invalid_ports in self.INVALID_PORTS_TYPES:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "contains an invalid type" in exc.value.msg
+
+ def test_config_non_unique_ports_validation(self):
+ for invalid_ports in self.NON_UNIQUE_SINGLE_PORTS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "non-unique" in exc.value.msg
+
+ def test_config_invalid_ports_format_validation(self):
+ for invalid_ports in self.INVALID_PORT_MAPPINGS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "Port ranges don't match in length" in exc.value.msg
+
+ def test_config_valid_ports_format_validation(self):
+ for valid_ports in self.VALID_SINGLE_PORTS + self.VALID_PORT_MAPPINGS:
+ self.check_config({'ports': valid_ports})
+
+ def test_config_invalid_expose_type_validation(self):
+ for invalid_expose in self.INVALID_PORTS_TYPES:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "contains an invalid type" in exc.value.msg
+
+ def test_config_non_unique_expose_validation(self):
+ for invalid_expose in self.NON_UNIQUE_SINGLE_PORTS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "non-unique" in exc.value.msg
+
+ def test_config_invalid_expose_format_validation(self):
+ # Valid port mappings ARE NOT valid 'expose' entries
+ for invalid_expose in self.INVALID_PORT_MAPPINGS + self.VALID_PORT_MAPPINGS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "should be of the format" in exc.value.msg
+
+ def test_config_valid_expose_format_validation(self):
+ # Valid single ports ARE valid 'expose' entries
+ for valid_expose in self.VALID_SINGLE_PORTS:
+ self.check_config({'expose': valid_expose})
+
+ def check_config(self, cfg):
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'web': dict(image='busybox', **cfg)
+ },
+ }, 'working_dir', 'filename.yml')
+ )
+
+
+class InterpolationTest(unittest.TestCase):
+
+ @mock.patch.dict(os.environ)
+ def test_config_file_with_environment_file(self):
+ project_dir = 'tests/fixtures/default-env-file'
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir)
+ )
+ ).services
+
+ self.assertEqual(service_dicts[0], {
+ 'name': 'web',
+ 'image': 'alpine:latest',
+ 'ports': [
+ types.ServicePort.parse('5643')[0],
+ types.ServicePort.parse('9999')[0]
+ ],
+ 'command': 'true'
+ })
+
+ @mock.patch.dict(os.environ)
+ def test_config_file_with_environment_variable(self):
+ project_dir = 'tests/fixtures/environment-interpolation'
+ os.environ.update(
+ IMAGE="busybox",
+ HOST_PORT="80",
+ LABEL_VALUE="myvalue",
+ )
+
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir)
+ )
+ ).services
+
+ self.assertEqual(service_dicts, [
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'ports': types.ServicePort.parse('80:8000'),
+ 'labels': {'mylabel': 'myvalue'},
+ 'hostname': 'host-',
+ 'command': '${ESCAPED}',
+ }
+ ])
+
+ @mock.patch.dict(os.environ)
+ def test_unset_variable_produces_warning(self):
+ os.environ.pop('FOO', None)
+ os.environ.pop('BAR', None)
+ config_details = build_config_details(
+ {
+ 'web': {
+ 'image': '${FOO}',
+ 'command': '${BAR}',
+ 'container_name': '${BAR}',
+ },
+ },
+ '.',
+ None,
+ )
+
+ with mock.patch('compose.config.environment.log') as log:
+ config.load(config_details)
+
+ self.assertEqual(2, log.warn.call_count)
+ warnings = sorted(args[0][0] for args in log.warn.call_args_list)
+ self.assertIn('BAR', warnings[0])
+ self.assertIn('FOO', warnings[1])
+
+ @mock.patch.dict(os.environ)
+ def test_invalid_interpolation(self):
+ with self.assertRaises(config.ConfigurationError) as cm:
+ config.load(
+ build_config_details(
+ {'web': {'image': '${'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ self.assertIn('Invalid', cm.exception.msg)
+ self.assertIn('for "image" option', cm.exception.msg)
+ self.assertIn('in service "web"', cm.exception.msg)
+ self.assertIn('"${"', cm.exception.msg)
+
+ @mock.patch.dict(os.environ)
+ def test_interpolation_secrets_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'secrets': {
+ 'secretdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.secrets == {
+ 'secretdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
+
+ @mock.patch.dict(os.environ)
+ def test_interpolation_configs_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'configs': {
+ 'configdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.configs == {
+ 'configdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
+
+
+class VolumeConfigTest(unittest.TestCase):
+
+ def test_no_binding(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/data'])
+
+ @mock.patch.dict(os.environ)
+ def test_volume_binding_with_environment_variable(self):
+ os.environ['VOLUME_PATH'] = '/host/path'
+
+ d = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}},
+ '.',
+ None,
+ )
+ ).services[0]
+ self.assertEqual(d['volumes'], [VolumeSpec.parse('/host/path:/container/path')])
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
+ @mock.patch.dict(os.environ)
+ def test_volume_binding_with_home(self):
+ os.environ['HOME'] = '/home/user'
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/home/user:/container/path'])
+
+ def test_name_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['mydatavolume:/data'])
+
+ def test_absolute_posix_path_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/var/lib/data:/data'])
+
+ def test_absolute_windows_path_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['c:\\data:/data'])
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
+ def test_relative_path_does_expand_posix(self):
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['./data:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['.:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/myproject:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['../otherproject:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/otherproject:/data'])
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths')
+ def test_relative_path_does_expand_windows(self):
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['./data:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['.:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['../otherproject:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data'])
+
+ @mock.patch.dict(os.environ)
+ def test_home_directory_with_driver_does_not_expand(self):
+ os.environ['NAME'] = 'surprise!'
+ d = make_service_dict('foo', {
+ 'build': '.',
+ 'volumes': ['~:/data'],
+ 'volume_driver': 'foodriver',
+ }, working_dir='.')
+ self.assertEqual(d['volumes'], ['~:/data'])
+
+ def test_volume_path_with_non_ascii_directory(self):
+ volume = u'/Füü/data:/data'
+ container_path = config.resolve_volume_path(".", volume)
+ self.assertEqual(container_path, volume)
+
+
+class MergePathMappingTest(object):
+ config_name = ""
+
+ def test_empty(self):
+ service_dict = config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+ assert self.config_name not in service_dict
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/foo:/code', '/data'])
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: ['/bar:/code']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code'])
+
+ def test_override_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {self.config_name: ['/bar:/code']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+
+ def test_add_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {self.config_name: ['/bar:/code', '/quux:/data']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/quux:/data'])
+
+ def test_remove_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/quux:/data']},
+ {self.config_name: ['/bar:/code', '/data']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+
+
+class MergeVolumesTest(unittest.TestCase, MergePathMappingTest):
+ config_name = 'volumes'
+
+
+class MergeDevicesTest(unittest.TestCase, MergePathMappingTest):
+ config_name = 'devices'
+
+
+class BuildOrImageMergeTest(unittest.TestCase):
+
+ def test_merge_build_or_image_no_override(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {}, V1),
+ {'build': '.'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {}, V1),
+ {'image': 'redis'},
+ )
+
+ def test_merge_build_or_image_override_with_same(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {'build': './web'}, V1),
+ {'build': './web'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}, V1),
+ {'image': 'postgres'},
+ )
+
+ def test_merge_build_or_image_override_with_other(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {'image': 'redis'}, V1),
+ {'image': 'redis'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1),
+ {'build': '.'}
+ )
+
+
+class MergeListsTest(object):
+ config_name = ""
+ base_config = []
+ override_config = []
+
+ def merged_config(self):
+ return set(self.base_config) | set(self.override_config)
+
+ def test_empty(self):
+ assert self.config_name not in config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.base_config)
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.base_config)
+
+ def test_add_item(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {self.config_name: self.override_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.merged_config())
+
+
+class MergePortsTest(unittest.TestCase, MergeListsTest):
+ config_name = 'ports'
+ base_config = ['10:8000', '9000']
+ override_config = ['20:8000']
+
+ def merged_config(self):
+ return self.convert(self.base_config) | self.convert(self.override_config)
+
+ def convert(self, port_config):
+ return set(config.merge_service_dicts(
+ {self.config_name: port_config},
+ {self.config_name: []},
+ DEFAULT_VERSION
+ )[self.config_name])
+
+ def test_duplicate_port_mappings(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION
+ )
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+
+class MergeNetworksTest(unittest.TestCase, MergeListsTest):
+ config_name = 'networks'
+ base_config = ['frontend', 'backend']
+ override_config = ['monitoring']
+
+
+class MergeStringsOrListsTest(unittest.TestCase):
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': '8.8.8.8'},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8'])
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {'dns': '8.8.8.8'},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8'])
+
+ def test_add_string(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': ['8.8.8.8']},
+ {'dns': '9.9.9.9'},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+
+ def test_add_list(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': '8.8.8.8'},
+ {'dns': ['9.9.9.9']},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+
+
+class MergeLabelsTest(unittest.TestCase):
+
+ def test_empty(self):
+ assert 'labels' not in config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.'}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': ''}
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.'}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '2'}
+
+ def test_override_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '2', 'bar': ''}
+
+ def test_add_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['bar=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': '2'}
+
+ def test_remove_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar=2']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['bar']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': ''}
+
+
+class MergeBuildTest(unittest.TestCase):
+ def test_full(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': ['com.docker.compose.test=true']
+ }
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': ['x=12'],
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result['context'] == override['context']
+ assert result['dockerfile'] == override['dockerfile']
+ assert result['args'] == {'x': '12', 'y': '2'}
+ assert set(result['cache_from']) == set(['ubuntu', 'debian'])
+ assert result['labels'] == override['labels']
+
+ def test_empty_override(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': {
+ 'com.docker.compose.test': 'true'
+ }
+ }
+
+ override = {}
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == base
+
+ def test_empty_base(self):
+ base = {}
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': {'x': '12'},
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == override
+
+
+class MemoryOptionsTest(unittest.TestCase):
+
+ def test_validation_fails_with_just_memswap_limit(self):
+ """
+ When you set a 'memswap_limit' it is invalid config unless you also set
+ a mem_limit
+ """
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'memswap_limit': 2000000},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.memswap_limit is invalid: when defining " \
+ "'memswap_limit' you must set 'mem_limit' as well" \
+ in excinfo.exconly()
+
+ def test_validation_with_correct_memswap_values(self):
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'image': 'busybox', 'mem_limit': 1000000, 'memswap_limit': 2000000}},
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+ self.assertEqual(service_dict[0]['memswap_limit'], 2000000)
+
+ def test_memswap_can_be_a_string(self):
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'image': 'busybox', 'mem_limit': "1G", 'memswap_limit': "512M"}},
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+ self.assertEqual(service_dict[0]['memswap_limit'], "512M")
+
+
+class EnvTest(unittest.TestCase):
+
+ def test_parse_environment_as_list(self):
+ environment = [
+ 'NORMAL=F1',
+ 'CONTAINS_EQUALS=F=2',
+ 'TRAILING_EQUALS=',
+ ]
+ self.assertEqual(
+ config.parse_environment(environment),
+ {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''},
+ )
+
+ def test_parse_environment_as_dict(self):
+ environment = {
+ 'NORMAL': 'F1',
+ 'CONTAINS_EQUALS': 'F=2',
+ 'TRAILING_EQUALS': None,
+ }
+ self.assertEqual(config.parse_environment(environment), environment)
+
+ def test_parse_environment_invalid(self):
+ with self.assertRaises(ConfigurationError):
+ config.parse_environment('a=b')
+
+ def test_parse_environment_empty(self):
+ self.assertEqual(config.parse_environment(None), {})
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_environment(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+
+ service_dict = {
+ 'build': '.',
+ 'environment': {
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': None,
+ 'NO_DEF': None
+ },
+ }
+ self.assertEqual(
+ resolve_environment(
+ service_dict, Environment.from_env_file(None)
+ ),
+ {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None},
+ )
+
+ def test_resolve_environment_from_env_file(self):
+ self.assertEqual(
+ resolve_environment({'env_file': ['tests/fixtures/env/one.env']}),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
+ )
+
+ def test_environment_overrides_env_file(self):
+ self.assertEqual(
+ resolve_environment({
+ 'environment': {'FOO': 'baz'},
+ 'env_file': ['tests/fixtures/env/one.env'],
+ }),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'},
+ )
+
+ def test_resolve_environment_with_multiple_env_files(self):
+ service_dict = {
+ 'env_file': [
+ 'tests/fixtures/env/one.env',
+ 'tests/fixtures/env/two.env'
+ ]
+ }
+ self.assertEqual(
+ resolve_environment(service_dict),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
+ )
+
+ def test_resolve_environment_nonexistent_file(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {'foo': {'image': 'example', 'env_file': 'nonexistent.env'}},
+ working_dir='tests/fixtures/env'))
+
+ assert 'Couldn\'t find env file' in exc.exconly()
+ assert 'nonexistent.env' in exc.exconly()
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_environment_from_env_file_with_empty_values(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+ self.assertEqual(
+ resolve_environment(
+ {'env_file': ['tests/fixtures/env/resolve.env']},
+ Environment.from_env_file(None)
+ ),
+ {
+ 'FILE_DEF': u'bär',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': 'E3',
+ 'NO_DEF': None
+ },
+ )
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_build_args(self):
+ os.environ['env_arg'] = 'value2'
+
+ build = {
+ 'context': '.',
+ 'args': {
+ 'arg1': 'value1',
+ 'empty_arg': '',
+ 'env_arg': None,
+ 'no_env': None
+ }
+ }
+ self.assertEqual(
+ resolve_build_args(build['args'], Environment.from_env_file(build['context'])),
+ {'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None},
+ )
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+ @mock.patch.dict(os.environ)
+ def test_resolve_path(self):
+ os.environ['HOSTENV'] = '/tmp'
+ os.environ['CONTAINERENV'] = '/host/tmp'
+
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}},
+ "tests/fixtures/env",
+ )
+ ).services[0]
+ self.assertEqual(
+ set(service_dict['volumes']),
+ set([VolumeSpec.parse('/tmp:/host/tmp')]))
+
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}},
+ "tests/fixtures/env",
+ )
+ ).services[0]
+ self.assertEqual(
+ set(service_dict['volumes']),
+ set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')]))
+
+
+def load_from_filename(filename, override_dir=None):
+ return config.load(
+ config.find('.', [filename], Environment.from_env_file('.'), override_dir=override_dir)
+ ).services
+
+
+class ExtendsTest(unittest.TestCase):
+
+ def test_extends(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml')
+
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'mydb',
+ 'image': 'busybox',
+ 'command': 'top',
+ },
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'command': 'top',
+ 'network_mode': 'bridge',
+ 'links': ['mydb:db'],
+ 'environment': {
+ "FOO": "1",
+ "BAR": "2",
+ "BAZ": "2",
+ },
+ }
+ ]))
+
+ def test_merging_env_labels_ulimits(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/common-env-labels-ulimits.yml')
+
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'network_mode': 'host',
+ 'environment': {
+ "FOO": "2",
+ "BAR": "1",
+ "BAZ": "3",
+ },
+ 'labels': {'label': 'one'},
+ 'ulimits': {'nproc': 65535, 'memlock': {'soft': 1024, 'hard': 2048}}
+ }
+ ]))
+
+ def test_nested(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/nested.yml')
+
+ self.assertEqual(service_dicts, [
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'network_mode': 'host',
+ 'environment': {
+ "FOO": "2",
+ "BAR": "2",
+ },
+ },
+ ])
+
+ def test_self_referencing_file(self):
+ """
+ We specify a 'file' key that is the filename we're already in.
+ """
+ service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml')
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'environment':
+ {
+ 'YEP': '1', 'BAR': '1', 'BAZ': '3'
+ },
+ 'image': 'busybox',
+ 'name': 'myweb'
+ },
+ {
+ 'environment':
+ {'YEP': '1'},
+ 'image': 'busybox',
+ 'name': 'otherweb'
+ },
+ {
+ 'environment':
+ {'YEP': '1', 'BAZ': '3'},
+ 'image': 'busybox',
+ 'name': 'web'
+ }
+ ]))
+
+ def test_circular(self):
+ with pytest.raises(config.CircularReference) as exc:
+ load_from_filename('tests/fixtures/extends/circle-1.yml')
+
+ path = [
+ (os.path.basename(filename), service_name)
+ for (filename, service_name) in exc.value.trail
+ ]
+ expected = [
+ ('circle-1.yml', 'web'),
+ ('circle-2.yml', 'other'),
+ ('circle-1.yml', 'web'),
+ ]
+ self.assertEqual(path, expected)
+
+ def test_extends_validation_empty_dictionary(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {}},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert 'service' in excinfo.exconly()
+
+ def test_extends_validation_missing_service_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {'file': 'common.yml'}},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "'service' is a required property" in excinfo.exconly()
+
+ def test_extends_validation_invalid_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'extends': {
+ 'file': 'common.yml',
+ 'service': 'web',
+ 'rogue_key': 'is not allowed'
+ }
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extends contains unsupported option: 'rogue_key'" \
+ in excinfo.exconly()
+
+ def test_extends_validation_sub_property_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'extends': {
+ 'file': 1,
+ 'service': 'web',
+ }
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extends.file contains 1, which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_extends_validation_no_file_key_no_filename_set(self):
+ dictionary = {'extends': {'service': 'web'}}
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends')
+
+ assert 'file' in excinfo.exconly()
+
+ def test_extends_validation_valid_config(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {'service': 'web', 'file': 'common.yml'}},
+ },
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+
+ self.assertEqual(len(service), 1)
+ self.assertIsInstance(service[0], dict)
+ self.assertEqual(service[0]['command'], "/bin/true")
+
+ def test_extended_service_with_invalid_config(self):
+ with pytest.raises(ConfigurationError) as exc:
+ load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml')
+ assert (
+ "myweb has neither an image nor a build context specified" in
+ exc.exconly()
+ )
+
+ def test_extended_service_with_valid_config(self):
+ service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml')
+ self.assertEqual(service[0]['command'], "top")
+
+ def test_extends_file_defaults_to_self(self):
+ """
+ Test not specifying a file in our extends options that the
+ config is valid and correctly extends from itself.
+ """
+ service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml')
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'environment': {
+ "BAR": "1",
+ "BAZ": "3",
+ }
+ },
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'environment': {
+ "BAZ": "3",
+ }
+ }
+ ]))
+
+ def test_invalid_links_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-links.yml')
+
+ assert "services with 'links' cannot be extended" in excinfo.exconly()
+
+ def test_invalid_volumes_from_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-volumes.yml')
+
+ assert "services with 'volumes_from' cannot be extended" in excinfo.exconly()
+
+ def test_invalid_net_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-net-v2.yml')
+
+ assert 'network_mode: service' in excinfo.exconly()
+ assert 'cannot be extended' in excinfo.exconly()
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-net.yml')
+
+ assert 'net: container' in excinfo.exconly()
+ assert 'cannot be extended' in excinfo.exconly()
+
+ @mock.patch.dict(os.environ)
+ def test_load_config_runs_interpolation_in_extended_service(self):
+ os.environ.update(HOSTNAME_VALUE="penguin")
+ expected_interpolated_value = "host-penguin"
+ service_dicts = load_from_filename(
+ 'tests/fixtures/extends/valid-interpolation.yml')
+ for service in service_dicts:
+ assert service['hostname'] == expected_interpolated_value
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+ def test_volume_path(self):
+ dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml')
+
+ paths = [
+ VolumeSpec(
+ os.path.abspath('tests/fixtures/volume-path/common/foo'),
+ '/foo',
+ 'rw'),
+ VolumeSpec(
+ os.path.abspath('tests/fixtures/volume-path/bar'),
+ '/bar',
+ 'rw')
+ ]
+
+ self.assertEqual(set(dicts[0]['volumes']), set(paths))
+
+ def test_parent_build_path_dne(self):
+ child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml')
+
+ self.assertEqual(child, [
+ {
+ 'name': 'dnechild',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'environment': {
+ "FOO": "1",
+ "BAR": "2",
+ },
+ },
+ ])
+
+ def test_load_throws_error_when_base_service_does_not_exist(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/nonexistent-service.yml')
+
+ assert "Cannot extend service 'foo'" in excinfo.exconly()
+ assert "Service not found" in excinfo.exconly()
+
+ def test_partial_service_config_in_extends_is_still_valid(self):
+ dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml')
+ self.assertEqual(dicts[0]['environment'], {'FOO': '1'})
+
+ def test_extended_service_with_verbose_and_shorthand_way(self):
+ services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml')
+ self.assertEqual(service_sort(services), service_sort([
+ {
+ 'name': 'base',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1'},
+ },
+ {
+ 'name': 'verbose',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1', 'FOO': '1'},
+ },
+ {
+ 'name': 'shorthand',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1', 'FOO': '2'},
+ },
+ ]))
+
+ @mock.patch.dict(os.environ)
+ def test_extends_with_environment_and_env_files(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_environment')
+ self.addCleanup(tmpdir.remove)
+ commondir = tmpdir.mkdir('common')
+ commondir.join('base.yml').write("""
+ app:
+ image: 'example/app'
+ env_file:
+ - 'envs'
+ environment:
+ - SECRET
+ - TEST_ONE=common
+ - TEST_TWO=common
+ """)
+ tmpdir.join('docker-compose.yml').write("""
+ ext:
+ extends:
+ file: common/base.yml
+ service: app
+ env_file:
+ - 'envs'
+ environment:
+ - THING
+ - TEST_ONE=top
+ """)
+ commondir.join('envs').write("""
+ COMMON_ENV_FILE
+ TEST_ONE=common-env-file
+ TEST_TWO=common-env-file
+ TEST_THREE=common-env-file
+ TEST_FOUR=common-env-file
+ """)
+ tmpdir.join('envs').write("""
+ TOP_ENV_FILE
+ TEST_ONE=top-env-file
+ TEST_TWO=top-env-file
+ TEST_THREE=top-env-file
+ """)
+
+ expected = [
+ {
+ 'name': 'ext',
+ 'image': 'example/app',
+ 'environment': {
+ 'SECRET': 'secret',
+ 'TOP_ENV_FILE': 'secret',
+ 'COMMON_ENV_FILE': 'secret',
+ 'THING': 'thing',
+ 'TEST_ONE': 'top',
+ 'TEST_TWO': 'common',
+ 'TEST_THREE': 'top-env-file',
+ 'TEST_FOUR': 'common-env-file',
+ },
+ },
+ ]
+
+ os.environ['SECRET'] = 'secret'
+ os.environ['THING'] = 'thing'
+ os.environ['COMMON_ENV_FILE'] = 'secret'
+ os.environ['TOP_ENV_FILE'] = 'secret'
+ config = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+
+ assert config == expected
+
+ def test_extends_with_mixed_versions_is_error(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_mixed_version')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ web:
+ extends:
+ file: base.yml
+ service: base
+ image: busybox
+ """)
+ tmpdir.join('base.yml').write("""
+ base:
+ volumes: ['/foo']
+ ports: ['3000:3000']
+ """)
+
+ with pytest.raises(ConfigurationError) as exc:
+ load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ assert 'Version mismatch' in exc.exconly()
+
+ def test_extends_with_defined_version_passes(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_defined_version')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ web:
+ extends:
+ file: base.yml
+ service: base
+ image: busybox
+ """)
+ tmpdir.join('base.yml').write("""
+ version: "2"
+ services:
+ base:
+ volumes: ['/foo']
+ ports: ['3000:3000']
+ command: top
+ """)
+
+ service = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ self.assertEqual(service[0]['command'], "top")
+
+ def test_extends_with_depends_on(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_depends_on')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ base:
+ image: example
+ web:
+ extends: base
+ image: busybox
+ depends_on: ['other']
+ other:
+ image: example
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ assert service_sort(services)[2]['depends_on'] == {
+ 'other': {'condition': 'service_started'}
+ }
+
+ def test_extends_with_healthcheck(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/healthcheck-2.yml')
+ assert service_sort(service_dicts) == [{
+ 'name': 'demo',
+ 'image': 'foobar:latest',
+ 'healthcheck': {
+ 'test': ['CMD', '/health.sh'],
+ 'interval': 10000000000,
+ 'timeout': 5000000000,
+ 'retries': 36,
+ }
+ }]
+
+ def test_extends_with_ports(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_ports')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: '2'
+
+ services:
+ a:
+ image: nginx
+ ports:
+ - 80
+
+ b:
+ extends:
+ service: a
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+
+ assert len(services) == 2
+ for svc in services:
+ assert svc['ports'] == [types.ServicePort('80', None, None, None, None)]
+
+
+@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+class ExpandPathTest(unittest.TestCase):
+ working_dir = '/home/user/somedir'
+
+ def test_expand_path_normal(self):
+ result = config.expand_path(self.working_dir, 'myfile')
+ self.assertEqual(result, self.working_dir + '/' + 'myfile')
+
+ def test_expand_path_absolute(self):
+ abs_path = '/home/user/otherdir/somefile'
+ result = config.expand_path(self.working_dir, abs_path)
+ self.assertEqual(result, abs_path)
+
+ def test_expand_path_with_tilde(self):
+ test_path = '~/otherdir/somefile'
+ with mock.patch.dict(os.environ):
+ os.environ['HOME'] = user_path = '/home/user/'
+ result = config.expand_path(self.working_dir, test_path)
+
+ self.assertEqual(result, user_path + 'otherdir/somefile')
+
+
+class VolumePathTest(unittest.TestCase):
+
+ def test_split_path_mapping_with_windows_path(self):
+ host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config"
+ windows_volume_path = host_path + ":/opt/connect/config:ro"
+ expected_mapping = ("/opt/connect/config", (host_path, 'ro'))
+
+ mapping = config.split_path_mapping(windows_volume_path)
+ assert mapping == expected_mapping
+
+ def test_split_path_mapping_with_windows_path_in_container(self):
+ host_path = 'c:\\Users\\remilia\\data'
+ container_path = 'c:\\scarletdevil\\data'
+ expected_mapping = (container_path, (host_path, None))
+
+ mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
+ assert mapping == expected_mapping
+
+ def test_split_path_mapping_with_root_mount(self):
+ host_path = '/'
+ container_path = '/var/hostroot'
+ expected_mapping = (container_path, (host_path, None))
+ mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
+ assert mapping == expected_mapping
+
+
+@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+class BuildPathTest(unittest.TestCase):
+
+ def setUp(self):
+ self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx')
+
+ def test_nonexistent_path(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'build': 'nonexistent.path'},
+ },
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_relative_path(self):
+ relative_build_path = '../build-ctx/'
+ service_dict = make_service_dict(
+ 'relpath',
+ {'build': relative_build_path},
+ working_dir='tests/fixtures/build-path'
+ )
+ self.assertEqual(service_dict['build'], self.abs_context_path)
+
+ def test_absolute_path(self):
+ service_dict = make_service_dict(
+ 'abspath',
+ {'build': self.abs_context_path},
+ working_dir='tests/fixtures/build-path'
+ )
+ self.assertEqual(service_dict['build'], self.abs_context_path)
+
+ def test_from_file(self):
+ service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
+ self.assertEqual(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+
+ def test_from_file_override_dir(self):
+ override_dir = os.path.join(os.getcwd(), 'tests/fixtures/')
+ service_dict = load_from_filename(
+ 'tests/fixtures/build-path-override-dir/docker-compose.yml', override_dir=override_dir)
+ self.assertEquals(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+
+ def test_valid_url_in_build_path(self):
+ valid_urls = [
+ 'git://github.com/docker/docker',
+ 'git@github.com:docker/docker.git',
+ 'git@bitbucket.org:atlassianlabs/atlassian-docker.git',
+ 'https://github.com/docker/docker.git',
+ 'http://github.com/docker/docker.git',
+ 'github.com/docker/docker.git',
+ ]
+ for valid_url in valid_urls:
+ service_dict = config.load(build_config_details({
+ 'validurl': {'build': valid_url},
+ }, '.', None)).services
+ assert service_dict[0]['build'] == {'context': valid_url}
+
+ def test_invalid_url_in_build_path(self):
+ invalid_urls = [
+ 'example.com/bogus',
+ 'ftp://example.com/',
+ '/path/does/not/exist',
+ ]
+ for invalid_url in invalid_urls:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'invalidurl': {'build': invalid_url},
+ }, '.', None))
+ assert 'build path' in exc.exconly()
+
+
+class HealthcheckTest(unittest.TestCase):
+ def test_healthcheck(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'test': ['CMD', 'true'],
+ 'interval': '1s',
+ 'timeout': '1m',
+ 'retries': 3,
+ 'start_period': '10s'
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['CMD', 'true'],
+ 'interval': nanoseconds_from_time_seconds(1),
+ 'timeout': nanoseconds_from_time_seconds(60),
+ 'retries': 3,
+ 'start_period': nanoseconds_from_time_seconds(10)
+ }
+
+ def test_disable(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'disable': True,
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['NONE'],
+ }
+
+ def test_disable_with_other_config_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ make_service_dict(
+ 'invalid-healthcheck',
+ {'healthcheck': {
+ 'disable': True,
+ 'interval': '1s',
+ }},
+ '.',
+ )
+
+ assert 'invalid-healthcheck' in excinfo.exconly()
+ assert 'disable' in excinfo.exconly()
+
+
+class GetDefaultConfigFilesTestCase(unittest.TestCase):
+
+ files = [
+ 'docker-compose.yml',
+ 'docker-compose.yaml',
+ ]
+
+ def test_get_config_path_default_file_in_basedir(self):
+ for index, filename in enumerate(self.files):
+ self.assertEqual(
+ filename,
+ get_config_filename_for_files(self.files[index:]))
+ with self.assertRaises(config.ComposeFileNotFound):
+ get_config_filename_for_files([])
+
+ def test_get_config_path_default_file_in_parent_dir(self):
+ """Test with files placed in the subdir"""
+
+ def get_config_in_subdir(files):
+ return get_config_filename_for_files(files, subdir=True)
+
+ for index, filename in enumerate(self.files):
+ self.assertEqual(filename, get_config_in_subdir(self.files[index:]))
+ with self.assertRaises(config.ComposeFileNotFound):
+ get_config_in_subdir([])
+
+
+def get_config_filename_for_files(filenames, subdir=None):
+ def make_files(dirname, filenames):
+ for fname in filenames:
+ with open(os.path.join(dirname, fname), 'w') as f:
+ f.write('')
+
+ project_dir = tempfile.mkdtemp()
+ try:
+ make_files(project_dir, filenames)
+ if subdir:
+ base_dir = tempfile.mkdtemp(dir=project_dir)
+ else:
+ base_dir = project_dir
+ filename, = config.get_default_config_files(base_dir)
+ return os.path.basename(filename)
+ finally:
+ shutil.rmtree(project_dir)
+
+
+class SerializeTest(unittest.TestCase):
+ def test_denormalize_depends_on_v3(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': ['service2', 'service3']
+ }
+
+ def test_denormalize_depends_on_v2_1(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V2_1) == service_dict
+
+ def test_serialize_time(self):
+ data = {
+ 9: '9ns',
+ 9000: '9us',
+ 9000000: '9ms',
+ 90000000: '90ms',
+ 900000000: '900ms',
+ 999999999: '999999999ns',
+ 1000000000: '1s',
+ 60000000000: '1m',
+ 60000000001: '60000000001ns',
+ 9000000000000: '150m',
+ 90000000000000: '25h',
+ }
+
+ for k, v in data.items():
+ assert serialize_ns_time_value(k) == v
+
+ def test_denormalize_healthcheck(self):
+ service_dict = {
+ 'image': 'test',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'interval': '1m40s',
+ 'timeout': '30s',
+ 'retries': 5,
+ 'start_period': '2s90ms'
+ }
+ }
+ processed_service = config.process_service(config.ServiceConfig(
+ '.', 'test', 'test', service_dict
+ ))
+ denormalized_service = denormalize_service_dict(processed_service, V2_3)
+ assert denormalized_service['healthcheck']['interval'] == '100s'
+ assert denormalized_service['healthcheck']['timeout'] == '30s'
+ assert denormalized_service['healthcheck']['start_period'] == '2090ms'
+
+ def test_denormalize_image_has_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+ image_digest = 'busybox@sha256:abcde'
+
+ assert denormalize_service_dict(service_dict, V3_0, image_digest) == {
+ 'image': 'busybox@sha256:abcde'
+ }
+
+ def test_denormalize_image_no_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox'
+ }
+
+ def test_serialize_secrets(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'secrets': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ secrets_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'services': {'web': service_dict},
+ 'secrets': secrets_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
+ assert 'secrets' in serialized_config
+ assert serialized_config['secrets']['two'] == secrets_dict['two']
+
+ def test_serialize_ports(self):
+ config_dict = config.Config(version=V2_0, services=[
+ {
+ 'ports': [types.ServicePort('80', '8080', None, None, None)],
+ 'image': 'alpine',
+ 'name': 'web'
+ }
+ ], volumes={}, networks={}, secrets={}, configs={})
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ assert '8080:80/tcp' in serialized_config['services']['web']['ports']
+
+ def test_serialize_configs(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'configs': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ configs_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'services': {'web': service_dict},
+ 'configs': configs_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['configs']) == secret_sort(service_dict['configs'])
+ assert 'configs' in serialized_config
+ assert serialized_config['configs']['two'] == configs_dict['two']
+
+ def test_serialize_bool_string(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'command': 'true',
+ 'environment': {'FOO': 'Y', 'BAR': 'on'}
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = serialize_config(config_dict)
+ assert 'command: "true"\n' in serialized_config
+ assert 'FOO: "Y"\n' in serialized_config
+ assert 'BAR: "on"\n' in serialized_config
+
+ def test_serialize_escape_dollar_sign(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': 'echo $$FOO',
+ 'environment': {
+ 'CURRENCY': '$$'
+ },
+ 'entrypoint': ['$$SHELL', '-c'],
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert serialized_service['environment']['CURRENCY'] == '$$'
+ assert serialized_service['command'] == 'echo $$FOO'
+ assert serialized_service['entrypoint'][0] == '$$SHELL'
diff --git a/tests/unit/config/environment_test.py b/tests/unit/config/environment_test.py
new file mode 100644
index 00000000..20446d2b
--- /dev/null
+++ b/tests/unit/config/environment_test.py
@@ -0,0 +1,40 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from compose.config.environment import Environment
+from tests import unittest
+
+
+class EnvironmentTest(unittest.TestCase):
+ def test_get_simple(self):
+ env = Environment({
+ 'FOO': 'bar',
+ 'BAR': '1',
+ 'BAZ': ''
+ })
+
+ assert env.get('FOO') == 'bar'
+ assert env.get('BAR') == '1'
+ assert env.get('BAZ') == ''
+
+ def test_get_undefined(self):
+ env = Environment({
+ 'FOO': 'bar'
+ })
+ assert env.get('FOOBAR') is None
+
+ def test_get_boolean(self):
+ env = Environment({
+ 'FOO': '',
+ 'BAR': '0',
+ 'BAZ': 'FALSE',
+ 'FOOBAR': 'true',
+ })
+
+ assert env.get_boolean('FOO') is False
+ assert env.get_boolean('BAR') is False
+ assert env.get_boolean('BAZ') is False
+ assert env.get_boolean('FOOBAR') is True
+ assert env.get_boolean('UNDEFINED') is False
diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py
new file mode 100644
index 00000000..018a5621
--- /dev/null
+++ b/tests/unit/config/interpolation_test.py
@@ -0,0 +1,148 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.environment import Environment
+from compose.config.interpolation import interpolate_environment_variables
+from compose.config.interpolation import Interpolator
+from compose.config.interpolation import InvalidInterpolation
+from compose.config.interpolation import TemplateWithDefaults
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+
+
+@pytest.fixture
+def mock_env():
+ return Environment({'USER': 'jenny', 'FOO': 'bar'})
+
+
+@pytest.fixture
+def variable_mapping():
+ return Environment({'FOO': 'first', 'BAR': ''})
+
+
+@pytest.fixture
+def defaults_interpolator(variable_mapping):
+ return Interpolator(TemplateWithDefaults, variable_mapping).interpolate
+
+
+def test_interpolate_environment_variables_in_services(mock_env):
+ services = {
+ 'servicea': {
+ 'image': 'example:${USER}',
+ 'volumes': ['$FOO:/target'],
+ 'logging': {
+ 'driver': '${FOO}',
+ 'options': {
+ 'user': '$USER',
+ }
+ }
+ }
+ }
+ expected = {
+ 'servicea': {
+ 'image': 'example:jenny',
+ 'volumes': ['bar:/target'],
+ 'logging': {
+ 'driver': 'bar',
+ 'options': {
+ 'user': 'jenny',
+ }
+ }
+ }
+ }
+ value = interpolate_environment_variables(V2_0, services, 'service', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_variables_in_volumes(mock_env):
+ volumes = {
+ 'data': {
+ 'driver': '$FOO',
+ 'driver_opts': {
+ 'max': 2,
+ 'user': '${USER}'
+ }
+ },
+ 'other': None,
+ }
+ expected = {
+ 'data': {
+ 'driver': 'bar',
+ 'driver_opts': {
+ 'max': 2,
+ 'user': 'jenny'
+ }
+ },
+ 'other': {},
+ }
+ value = interpolate_environment_variables(V2_0, volumes, 'volume', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_variables_in_secrets(mock_env):
+ secrets = {
+ 'secretservice': {
+ 'file': '$FOO',
+ 'labels': {
+ 'max': 2,
+ 'user': '${USER}'
+ }
+ },
+ 'other': None,
+ }
+ expected = {
+ 'secretservice': {
+ 'file': 'bar',
+ 'labels': {
+ 'max': 2,
+ 'user': 'jenny'
+ }
+ },
+ 'other': {},
+ }
+ value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
+ assert value == expected
+
+
+def test_escaped_interpolation(defaults_interpolator):
+ assert defaults_interpolator('$${foo}') == '${foo}'
+
+
+def test_invalid_interpolation(defaults_interpolator):
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('$}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ foo}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo!}')
+
+
+def test_interpolate_missing_no_default(defaults_interpolator):
+ assert defaults_interpolator("This ${missing} var") == "This var"
+ assert defaults_interpolator("This ${BAR} var") == "This var"
+
+
+def test_interpolate_with_value(defaults_interpolator):
+ assert defaults_interpolator("This $FOO var") == "This first var"
+ assert defaults_interpolator("This ${FOO} var") == "This first var"
+
+
+def test_interpolate_missing_with_default(defaults_interpolator):
+ assert defaults_interpolator("ok ${missing:-def}") == "ok def"
+ assert defaults_interpolator("ok ${missing-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
+
+
+def test_interpolate_with_empty_and_default_value(defaults_interpolator):
+ assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR-def}") == "ok "
diff --git a/tests/unit/config/sort_services_test.py b/tests/unit/config/sort_services_test.py
new file mode 100644
index 00000000..c39ac022
--- /dev/null
+++ b/tests/unit/config/sort_services_test.py
@@ -0,0 +1,243 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.errors import DependencyError
+from compose.config.sort_services import sort_service_dicts
+from compose.config.types import VolumeFromSpec
+
+
+class TestSortService(object):
+ def test_sort_service_dicts_1(self):
+ services = [
+ {
+ 'links': ['redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'grunt'
+ },
+ {
+ 'name': 'redis'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'grunt'
+ assert sorted_services[1]['name'] == 'redis'
+ assert sorted_services[2]['name'] == 'web'
+
+ def test_sort_service_dicts_2(self):
+ services = [
+ {
+ 'links': ['redis', 'postgres'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'postgres',
+ 'links': ['redis']
+ },
+ {
+ 'name': 'redis'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'redis'
+ assert sorted_services[1]['name'] == 'postgres'
+ assert sorted_services[2]['name'] == 'web'
+
+ def test_sort_service_dicts_3(self):
+ services = [
+ {
+ 'name': 'child'
+ },
+ {
+ 'name': 'parent',
+ 'links': ['child']
+ },
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_4(self):
+ services = [
+ {
+ 'name': 'child'
+ },
+ {
+ 'name': 'parent',
+ 'volumes_from': [VolumeFromSpec('child', 'rw', 'service')]
+ },
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_5(self):
+ services = [
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ {
+ 'name': 'parent',
+ 'network_mode': 'service:child'
+ },
+ {
+ 'name': 'child'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_6(self):
+ services = [
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ {
+ 'name': 'parent',
+ 'volumes_from': [VolumeFromSpec('child', 'ro', 'service')]
+ },
+ {
+ 'name': 'child'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_7(self):
+ services = [
+ {
+ 'network_mode': 'service:three',
+ 'name': 'four'
+ },
+ {
+ 'links': ['two'],
+ 'name': 'three'
+ },
+ {
+ 'name': 'two',
+ 'volumes_from': [VolumeFromSpec('one', 'rw', 'service')]
+ },
+ {
+ 'name': 'one'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 4
+ assert sorted_services[0]['name'] == 'one'
+ assert sorted_services[1]['name'] == 'two'
+ assert sorted_services[2]['name'] == 'three'
+ assert sorted_services[3]['name'] == 'four'
+
+ def test_sort_service_dicts_circular_imports(self):
+ services = [
+ {
+ 'links': ['redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'redis',
+ 'links': ['web']
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'redis' in exc.exconly()
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_circular_imports_2(self):
+ services = [
+ {
+ 'links': ['postgres', 'redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'redis',
+ 'links': ['web']
+ },
+ {
+ 'name': 'postgres'
+ }
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'redis' in exc.exconly()
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_circular_imports_3(self):
+ services = [
+ {
+ 'links': ['b'],
+ 'name': 'a'
+ },
+ {
+ 'name': 'b',
+ 'links': ['c']
+ },
+ {
+ 'name': 'c',
+ 'links': ['a']
+ }
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'a' in exc.exconly()
+ assert 'b' in exc.exconly()
+
+ def test_sort_service_dicts_self_imports(self):
+ services = [
+ {
+ 'links': ['web'],
+ 'name': 'web'
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_depends_on_self(self):
+ services = [
+ {
+ 'depends_on': ['web'],
+ 'name': 'web'
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'A service can not depend on itself: web' in exc.exconly()
diff --git a/tests/unit/config/types_test.py b/tests/unit/config/types_test.py
new file mode 100644
index 00000000..3a43f727
--- /dev/null
+++ b/tests/unit/config/types_test.py
@@ -0,0 +1,235 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.errors import ConfigurationError
+from compose.config.types import parse_extra_hosts
+from compose.config.types import ServicePort
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+def test_parse_extra_hosts_list():
+ expected = {'www.example.com': '192.168.0.17'}
+ assert parse_extra_hosts(["www.example.com:192.168.0.17"]) == expected
+
+ expected = {'www.example.com': '192.168.0.17'}
+ assert parse_extra_hosts(["www.example.com: 192.168.0.17"]) == expected
+
+ assert parse_extra_hosts([
+ "www.example.com: 192.168.0.17",
+ "static.example.com:192.168.0.19",
+ "api.example.com: 192.168.0.18",
+ "v6.example.com: ::1"
+ ]) == {
+ 'www.example.com': '192.168.0.17',
+ 'static.example.com': '192.168.0.19',
+ 'api.example.com': '192.168.0.18',
+ 'v6.example.com': '::1'
+ }
+
+
+def test_parse_extra_hosts_dict():
+ assert parse_extra_hosts({
+ 'www.example.com': '192.168.0.17',
+ 'api.example.com': '192.168.0.18'
+ }) == {
+ 'www.example.com': '192.168.0.17',
+ 'api.example.com': '192.168.0.18'
+ }
+
+
+class TestServicePort(object):
+ def test_parse_dict(self):
+ data = {
+ 'target': 8000,
+ 'published': 8000,
+ 'protocol': 'udp',
+ 'mode': 'global',
+ }
+ ports = ServicePort.parse(data)
+ assert len(ports) == 1
+ assert ports[0].repr() == data
+
+ def test_parse_simple_target_port(self):
+ ports = ServicePort.parse(8000)
+ assert len(ports) == 1
+ assert ports[0].target == 8000
+
+ def test_parse_complete_port_definition(self):
+ port_def = '1.1.1.1:3000:3000/udp'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'published': 3000,
+ 'external_ip': '1.1.1.1',
+ 'protocol': 'udp',
+ }
+ assert ports[0].legacy_repr() == port_def
+
+ def test_parse_ext_ip_no_published_port(self):
+ port_def = '1.1.1.1::3000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'external_ip': '1.1.1.1',
+ }
+
+ def test_repr_published_port_0(self):
+ port_def = '0:4000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+
+ def test_parse_port_range(self):
+ ports = ServicePort.parse('25000-25001:4000-4001')
+ assert len(ports) == 2
+ reprs = [p.repr() for p in ports]
+ assert {
+ 'target': 4000,
+ 'published': 25000
+ } in reprs
+ assert {
+ 'target': 4001,
+ 'published': 25001
+ } in reprs
+
+ def test_parse_invalid_port(self):
+ port_def = '4000p'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+
+class TestVolumeSpec(object):
+
+ def test_parse_volume_spec_only_one_path(self):
+ spec = VolumeSpec.parse('/the/volume')
+ assert spec == (None, '/the/volume', 'rw')
+
+ def test_parse_volume_spec_internal_and_external(self):
+ spec = VolumeSpec.parse('external:interval')
+ assert spec == ('external', 'interval', 'rw')
+
+ def test_parse_volume_spec_with_mode(self):
+ spec = VolumeSpec.parse('external:interval:ro')
+ assert spec == ('external', 'interval', 'ro')
+
+ spec = VolumeSpec.parse('external:interval:z')
+ assert spec == ('external', 'interval', 'z')
+
+ def test_parse_volume_spec_too_many_parts(self):
+ with pytest.raises(ConfigurationError) as exc:
+ VolumeSpec.parse('one:two:three:four')
+ assert 'has incorrect format' in exc.exconly()
+
+ def test_parse_volume_windows_absolute_path_normalized(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ "/c/Users/me/Documents/shiny/config",
+ "/opt/shiny/config",
+ "ro"
+ )
+
+ def test_parse_volume_windows_absolute_path_native(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ "c:\\Users\\me\\Documents\\shiny\\config",
+ "/opt/shiny/config",
+ "ro"
+ )
+
+ def test_parse_volume_windows_internal_path_normalized(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Users/reimu/scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_internal_path_native(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Users\\reimu\\scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_normalized(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/e/',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_native(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'E:\\',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_mixed_notations_normalized(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Foo',
+ '/root/foo',
+ 'rw'
+ )
+
+ def test_parse_volume_windows_mixed_notations_native(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Foo',
+ '/root/foo',
+ 'rw'
+ )
+
+
+class TestVolumesFromSpec(object):
+
+ services = ['servicea', 'serviceb']
+
+ def test_parse_v1_from_service(self):
+ volume_from = VolumeFromSpec.parse('servicea', self.services, V1)
+ assert volume_from == VolumeFromSpec('servicea', 'rw', 'service')
+
+ def test_parse_v1_from_container(self):
+ volume_from = VolumeFromSpec.parse('foo:ro', self.services, V1)
+ assert volume_from == VolumeFromSpec('foo', 'ro', 'container')
+
+ def test_parse_v1_invalid(self):
+ with pytest.raises(ConfigurationError):
+ VolumeFromSpec.parse('unknown:format:ro', self.services, V1)
+
+ def test_parse_v2_from_service(self):
+ volume_from = VolumeFromSpec.parse('servicea', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('servicea', 'rw', 'service')
+
+ def test_parse_v2_from_service_with_mode(self):
+ volume_from = VolumeFromSpec.parse('servicea:ro', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('servicea', 'ro', 'service')
+
+ def test_parse_v2_from_container(self):
+ volume_from = VolumeFromSpec.parse('container:foo', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('foo', 'rw', 'container')
+
+ def test_parse_v2_from_container_with_mode(self):
+ volume_from = VolumeFromSpec.parse('container:foo:ro', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('foo', 'ro', 'container')
+
+ def test_parse_v2_invalid_type(self):
+ with pytest.raises(ConfigurationError) as exc:
+ VolumeFromSpec.parse('bogus:foo:ro', self.services, V2_0)
+ assert "Unknown volumes_from type 'bogus'" in exc.exconly()
+
+ def test_parse_v2_invalid(self):
+ with pytest.raises(ConfigurationError):
+ VolumeFromSpec.parse('unknown:format:ro', self.services, V2_0)
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
new file mode 100644
index 00000000..04f43016
--- /dev/null
+++ b/tests/unit/container_test.py
@@ -0,0 +1,198 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+
+from .. import mock
+from .. import unittest
+from compose.container import Container
+from compose.container import get_container_name
+
+
+class ContainerTest(unittest.TestCase):
+
+ def setUp(self):
+ self.container_id = "abcabcabcbabc12345"
+ self.container_dict = {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Command": "top",
+ "Created": 1387384730,
+ "Status": "Up 8 seconds",
+ "Ports": None,
+ "SizeRw": 0,
+ "SizeRootFs": 0,
+ "Names": ["/composetest_db_1", "/composetest_web_1/db"],
+ "NetworkSettings": {
+ "Ports": {},
+ },
+ "Config": {
+ "Labels": {
+ "com.docker.compose.project": "composetest",
+ "com.docker.compose.service": "web",
+ "com.docker.compose.container-number": 7,
+ },
+ }
+ }
+
+ def test_from_ps(self):
+ container = Container.from_ps(None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(
+ container.dictionary,
+ {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Name": "/composetest_db_1",
+ })
+
+ def test_from_ps_prefixed(self):
+ self.container_dict['Names'] = [
+ '/swarm-host-1' + n for n in self.container_dict['Names']
+ ]
+
+ container = Container.from_ps(
+ None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(container.dictionary, {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Name": "/composetest_db_1",
+ })
+
+ def test_environment(self):
+ container = Container(None, {
+ 'Id': 'abc',
+ 'Config': {
+ 'Env': [
+ 'FOO=BAR',
+ 'BAZ=DOGE',
+ ]
+ }
+ }, has_been_inspected=True)
+ self.assertEqual(container.environment, {
+ 'FOO': 'BAR',
+ 'BAZ': 'DOGE',
+ })
+
+ def test_number(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.number, 7)
+
+ def test_name(self):
+ container = Container.from_ps(None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(container.name, "composetest_db_1")
+
+ def test_name_without_project(self):
+ self.container_dict['Name'] = "/composetest_web_7"
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.name_without_project, "web_7")
+
+ def test_name_without_project_custom_container_name(self):
+ self.container_dict['Name'] = "/custom_name_of_container"
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.name_without_project, "custom_name_of_container")
+
+ def test_inspect_if_not_inspected(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+ container = Container(mock_client, dict(Id="the_id"))
+
+ container.inspect_if_not_inspected()
+ mock_client.inspect_container.assert_called_once_with("the_id")
+ self.assertEqual(container.dictionary,
+ mock_client.inspect_container.return_value)
+ self.assertTrue(container.has_been_inspected)
+
+ container.inspect_if_not_inspected()
+ self.assertEqual(mock_client.inspect_container.call_count, 1)
+
+ def test_human_readable_ports_none(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.human_readable_ports, '')
+
+ def test_human_readable_ports_public_and_private(self):
+ self.container_dict['NetworkSettings']['Ports'].update({
+ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
+ "45453/tcp": [],
+ })
+ container = Container(None, self.container_dict, has_been_inspected=True)
+
+ expected = "45453/tcp, 0.0.0.0:49197->45454/tcp"
+ self.assertEqual(container.human_readable_ports, expected)
+
+ def test_get_local_port(self):
+ self.container_dict['NetworkSettings']['Ports'].update({
+ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
+ })
+ container = Container(None, self.container_dict, has_been_inspected=True)
+
+ self.assertEqual(
+ container.get_local_port(45454, protocol='tcp'),
+ '0.0.0.0:49197')
+
+ def test_get(self):
+ container = Container(None, {
+ "Status": "Up 8 seconds",
+ "HostConfig": {
+ "VolumesFrom": ["volume_id"]
+ },
+ }, has_been_inspected=True)
+
+ self.assertEqual(container.get('Status'), "Up 8 seconds")
+ self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"])
+ self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None)
+
+ def test_short_id(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ assert container.short_id == self.container_id[:12]
+
+ def test_has_api_logs(self):
+ container_dict = {
+ 'HostConfig': {
+ 'LogConfig': {
+ 'Type': 'json-file'
+ }
+ }
+ }
+
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'none'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'syslog'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'journald'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'foobar'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+
+class GetContainerNameTestCase(unittest.TestCase):
+
+ def test_get_container_name(self):
+ self.assertIsNone(get_container_name({}))
+ self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1')
+ self.assertEqual(
+ get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}),
+ 'myproject_db_1')
+ self.assertEqual(
+ get_container_name({
+ 'Names': [
+ '/swarm-host-1/myproject_db_1',
+ '/swarm-host-1/myproject_web_1/db'
+ ]
+ }),
+ 'myproject_db_1'
+ )
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
new file mode 100644
index 00000000..b27339af
--- /dev/null
+++ b/tests/unit/network_test.py
@@ -0,0 +1,161 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from .. import mock
+from .. import unittest
+from compose.network import check_remote_network_config
+from compose.network import Network
+from compose.network import NetworkConfigChangedError
+
+
+class NetworkTest(unittest.TestCase):
+ def test_check_remote_network_config_success(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ labels = {
+ 'com.project.tests.istest': 'true',
+ 'com.project.sound.track': 'way out of here',
+ }
+ remote_labels = labels.copy()
+ remote_labels.update({
+ 'com.docker.compose.project': 'compose_test',
+ 'com.docker.compose.network': 'net1',
+ })
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge',
+ options, enable_ipv6=True, ipam=ipam_config,
+ labels=labels
+ )
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Options': options,
+ 'EnableIPv6': True,
+ 'Internal': False,
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ },
+ 'Labels': remote_labels
+ },
+ net
+ )
+
+ def test_check_remote_network_config_whitelist(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ remote_options = {
+ 'com.docker.network.driver.overlay.vxlanid_list': '257',
+ 'com.docker.network.driver.foo': 'bar',
+ 'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b',
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'overlay',
+ options
+ )
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': remote_options}, net
+ )
+
+ def test_check_remote_network_config_driver_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config(
+ {'Driver': 'bridge', 'Options': {}}, net
+ )
+
+ assert 'driver has changed' in str(e.value)
+
+ def test_check_remote_network_config_options_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config({'Driver': 'overlay', 'Options': {
+ 'com.docker.network.driver.foo': 'baz'
+ }}, net)
+
+ assert 'option "com.docker.network.driver.foo" has changed' in str(e.value)
+
+ def test_check_remote_network_config_null_remote(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': None}, net
+ )
+
+ def test_check_remote_network_config_null_remote_ipam_options(self):
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge', ipam=ipam_config,
+ )
+
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ 'Options': None
+ },
+ },
+ net
+ )
+
+ def test_check_remote_network_labels_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay', labels={
+ 'com.project.touhou.character': 'sakuya.izayoi'
+ })
+ remote = {
+ 'Driver': 'overlay',
+ 'Options': None,
+ 'Labels': {
+ 'com.docker.compose.network': 'net1',
+ 'com.docker.compose.project': 'compose_test',
+ 'com.project.touhou.character': 'marisa.kirisame',
+ }
+ }
+ with mock.patch('compose.network.log') as mock_log:
+ check_remote_network_config(remote, net)
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warn.mock_calls[0]
+ assert 'label "com.project.touhou.character" has changed' in args[0]
diff --git a/tests/unit/parallel_test.py b/tests/unit/parallel_test.py
new file mode 100644
index 00000000..3a60f01a
--- /dev/null
+++ b/tests/unit/parallel_test.py
@@ -0,0 +1,163 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from threading import Lock
+
+import six
+from docker.errors import APIError
+
+from compose.parallel import parallel_execute
+from compose.parallel import parallel_execute_iter
+from compose.parallel import ParallelStreamWriter
+from compose.parallel import UpstreamError
+
+
+web = 'web'
+db = 'db'
+data_volume = 'data_volume'
+cache = 'cache'
+
+objects = [web, db, data_volume, cache]
+
+deps = {
+ web: [db, cache],
+ db: [data_volume],
+ data_volume: [],
+ cache: [],
+}
+
+
+def get_deps(obj):
+ return [(dep, None) for dep in deps[obj]]
+
+
+def test_parallel_execute():
+ results, errors = parallel_execute(
+ objects=[1, 2, 3, 4, 5],
+ func=lambda x: x * 2,
+ get_name=six.text_type,
+ msg="Doubling",
+ )
+
+ assert sorted(results) == [2, 4, 6, 8, 10]
+ assert errors == {}
+
+
+def test_parallel_execute_with_limit():
+ limit = 1
+ tasks = 20
+ lock = Lock()
+
+ def f(obj):
+ locked = lock.acquire(False)
+ # we should always get the lock because we're the only thread running
+ assert locked
+ lock.release()
+ return None
+
+ results, errors = parallel_execute(
+ objects=list(range(tasks)),
+ func=f,
+ get_name=six.text_type,
+ msg="Testing",
+ limit=limit,
+ )
+
+ assert results == tasks * [None]
+ assert errors == {}
+
+
+def test_parallel_execute_with_deps():
+ log = []
+
+ def process(x):
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert sorted(log) == sorted(objects)
+
+ assert log.index(data_volume) < log.index(db)
+ assert log.index(db) < log.index(web)
+ assert log.index(cache) < log.index(web)
+
+
+def test_parallel_execute_with_upstream_errors():
+ log = []
+
+ def process(x):
+ if x is data_volume:
+ raise APIError(None, None, "Something went wrong")
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert log == [cache]
+
+ events = [
+ (obj, result, type(exception))
+ for obj, result, exception
+ in parallel_execute_iter(objects, process, get_deps, None)
+ ]
+
+ assert (cache, None, type(None)) in events
+ assert (data_volume, None, APIError) in events
+ assert (db, None, UpstreamError) in events
+ assert (web, None, UpstreamError) in events
+
+
+def test_parallel_execute_alignment(capsys):
+ results, errors = parallel_execute(
+ objects=["short", "a very long name"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Aligning",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ a, b = err.split('\n')[:2]
+ assert a.index('...') == b.index('...')
+
+
+def test_parallel_execute_ansi(capsys):
+ ParallelStreamWriter.set_noansi(value=False)
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" in err
+
+
+def test_parallel_execute_noansi(capsys):
+ ParallelStreamWriter.set_noansi()
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" not in err
diff --git a/tests/unit/progress_stream_test.py b/tests/unit/progress_stream_test.py
new file mode 100644
index 00000000..c0cb906d
--- /dev/null
+++ b/tests/unit/progress_stream_test.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from six import StringIO
+
+from compose import progress_stream
+from tests import unittest
+
+
+class ProgressStreamTestCase(unittest.TestCase):
+ def test_stream_output(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'31019763, "start": 1413653874, "total": 62763875}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_div_zero(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'0, "start": 1413653874, "total": 0}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_null_total(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'0, "start": 1413653874, "total": null}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_progress_event_tty(self):
+ events = [
+ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
+ ]
+
+ class TTYStringIO(StringIO):
+ def isatty(self):
+ return True
+
+ output = TTYStringIO()
+ events = progress_stream.stream_output(events, output)
+ self.assertTrue(len(output.getvalue()) > 0)
+
+ def test_stream_output_progress_event_no_tty(self):
+ events = [
+ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
+ ]
+ output = StringIO()
+
+ events = progress_stream.stream_output(events, output)
+ self.assertEqual(len(output.getvalue()), 0)
+
+ def test_stream_output_no_progress_event_no_tty(self):
+ events = [
+ b'{"status": "Pulling from library/xy", "id": "latest"}'
+ ]
+ output = StringIO()
+
+ events = progress_stream.stream_output(events, output)
+ self.assertTrue(len(output.getvalue()) > 0)
+
+
+def test_get_digest_from_push():
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"progressDetail": {}, "aux": {"Digest": digest}},
+ ]
+ assert progress_stream.get_digest_from_push(events) == digest
+
+
+def test_get_digest_from_pull():
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"status": "Digest: %s" % digest},
+ ]
+ assert progress_stream.get_digest_from_pull(events) == digest
diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py
new file mode 100644
index 00000000..e5f1a175
--- /dev/null
+++ b/tests/unit/project_test.py
@@ -0,0 +1,570 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+
+import docker
+from docker.errors import NotFound
+
+from .. import mock
+from .. import unittest
+from compose.config.config import Config
+from compose.config.types import VolumeFromSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import LABEL_SERVICE
+from compose.container import Container
+from compose.project import Project
+from compose.service import ImageType
+from compose.service import Service
+
+
+class ProjectTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_from_config_v1(self):
+ config = Config(
+ version=V1,
+ services=[
+ {
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ },
+ {
+ 'name': 'db',
+ 'image': 'busybox:latest',
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config,
+ client=None,
+ )
+ self.assertEqual(len(project.services), 2)
+ self.assertEqual(project.get_service('web').name, 'web')
+ self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
+ self.assertEqual(project.get_service('db').name, 'db')
+ self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
+ self.assertFalse(project.networks.use_networking)
+
+ def test_from_config_v2(self):
+ config = Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ },
+ {
+ 'name': 'db',
+ 'image': 'busybox:latest',
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ )
+ project = Project.from_config('composetest', config, None)
+ self.assertEqual(len(project.services), 2)
+ self.assertTrue(project.networks.use_networking)
+
+ def test_get_service(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ client=None,
+ image="busybox:latest",
+ )
+ project = Project('test', [web], None)
+ self.assertEqual(project.get_service('web'), web)
+
+ def test_get_services_returns_all_services_without_args(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ )
+ project = Project('test', [web, console], None)
+ self.assertEqual(project.get_services(), [web, console])
+
+ def test_get_services_returns_listed_services_with_args(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ )
+ project = Project('test', [web, console], None)
+ self.assertEqual(project.get_services(['console']), [console])
+
+ def test_get_services_with_include_links(self):
+ db = Service(
+ project='composetest',
+ name='db',
+ image='foo',
+ )
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ links=[(db, 'database')]
+ )
+ cache = Service(
+ project='composetest',
+ name='cache',
+ image='foo'
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ links=[(web, 'web')]
+ )
+ project = Project('test', [web, db, cache, console], None)
+ self.assertEqual(
+ project.get_services(['console'], include_deps=True),
+ [db, web, console]
+ )
+
+ def test_get_services_removes_duplicates_following_links(self):
+ db = Service(
+ project='composetest',
+ name='db',
+ image='foo',
+ )
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ links=[(db, 'database')]
+ )
+ project = Project('test', [web, db], None)
+ self.assertEqual(
+ project.get_services(['web', 'db'], include_deps=True),
+ [db, web]
+ )
+
+ def test_use_volumes_from_container(self):
+ container_id = 'aabbccddee'
+ container_dict = dict(Name='aaa', Id=container_id)
+ self.mock_client.inspect_container.return_value = container_dict
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('aaa', 'rw', 'container')]
+ }],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ assert project.get_service('test')._get_volumes_from() == [container_id + ":rw"]
+
+ def test_use_volumes_from_service_no_container(self):
+ container_name = 'test_vol_1'
+ self.mock_client.containers.return_value = [
+ {
+ "Name": container_name,
+ "Names": [container_name],
+ "Id": container_name,
+ "Image": 'busybox:latest'
+ }
+ ]
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'vol',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ assert project.get_service('test')._get_volumes_from() == [container_name + ":rw"]
+
+ def test_use_volumes_from_service_container(self):
+ container_ids = ['aabbccddee', '12345']
+
+ project = Project.from_config(
+ name='test',
+ client=None,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'vol',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ with mock.patch.object(Service, 'containers') as mock_return:
+ mock_return.return_value = [
+ mock.Mock(id=container_id, spec=Container)
+ for container_id in container_ids]
+ assert (
+ project.get_service('test')._get_volumes_from() ==
+ [container_ids[0] + ':rw']
+ )
+
+ def test_events(self):
+ services = [Service(name='web'), Service(name='db')]
+ project = Project('test', services, self.mock_client)
+ self.mock_client.events.return_value = iter([
+ {
+ 'status': 'create',
+ 'from': 'example/image',
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000002000,
+ },
+ {
+ 'status': 'attach',
+ 'from': 'example/image',
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000003000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/other',
+ 'id': 'bdbdbd',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000005000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/db',
+ 'id': 'ababa',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ {
+ 'status': 'destroy',
+ 'from': 'example/db',
+ 'id': 'eeeee',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ ])
+
+ def dt_with_microseconds(dt, us):
+ return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
+
+ def get_container(cid):
+ if cid == 'eeeee':
+ raise NotFound(None, None, "oops")
+ if cid == 'abcde':
+ name = 'web'
+ labels = {LABEL_SERVICE: name}
+ elif cid == 'ababa':
+ name = 'db'
+ labels = {LABEL_SERVICE: name}
+ else:
+ labels = {}
+ name = ''
+ return {
+ 'Id': cid,
+ 'Config': {'Labels': labels},
+ 'Name': '/project_%s_1' % name,
+ }
+
+ self.mock_client.inspect_container.side_effect = get_container
+
+ events = project.events()
+
+ events_list = list(events)
+ # Assert the return value is a generator
+ assert not list(events)
+ assert events_list == [
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'create',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'project_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 2),
+ 'container': Container(None, {'Id': 'abcde'}),
+ },
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'attach',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'project_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 3),
+ 'container': Container(None, {'Id': 'abcde'}),
+ },
+ {
+ 'type': 'container',
+ 'service': 'db',
+ 'action': 'create',
+ 'id': 'ababa',
+ 'attributes': {
+ 'name': 'project_db_1',
+ 'image': 'example/db',
+ },
+ 'time': dt_with_microseconds(1420092061, 4),
+ 'container': Container(None, {'Id': 'ababa'}),
+ },
+ ]
+
+ def test_net_unset(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V1,
+ services=[
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.id, None)
+ self.assertNotIn('NetworkMode', service._get_container_host_config({}))
+
+ def test_use_net_from_container(self):
+ container_id = 'aabbccddee'
+ container_dict = dict(Name='aaa', Id=container_id)
+ self.mock_client.inspect_container.return_value = container_dict
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'network_mode': 'container:aaa'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.mode, 'container:' + container_id)
+
+ def test_use_net_from_service(self):
+ container_name = 'test_aaa_1'
+ self.mock_client.containers.return_value = [
+ {
+ "Name": container_name,
+ "Names": [container_name],
+ "Id": container_name,
+ "Image": 'busybox:latest'
+ }
+ ]
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'aaa',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'network_mode': 'service:aaa'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.mode, 'container:' + container_name)
+
+ def test_uses_default_network_true(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'foo',
+ 'image': 'busybox:latest'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ assert 'default' in project.networks.networks
+
+ def test_uses_default_network_false(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'foo',
+ 'image': 'busybox:latest',
+ 'networks': {'custom': None}
+ },
+ ],
+ networks={'custom': {}},
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ assert 'default' not in project.networks.networks
+
+ def test_container_without_name(self):
+ self.mock_client.containers.return_value = [
+ {'Image': 'busybox:latest', 'Id': '1', 'Name': '1'},
+ {'Image': 'busybox:latest', 'Id': '2', 'Name': None},
+ {'Image': 'busybox:latest', 'Id': '3'},
+ ]
+ self.mock_client.inspect_container.return_value = {
+ 'Id': '1',
+ 'Config': {
+ 'Labels': {
+ LABEL_SERVICE: 'web',
+ },
+ },
+ }
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ self.assertEqual([c.id for c in project.containers()], ['1'])
+
+ def test_down_with_no_resources(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }],
+ networks={'default': {}},
+ volumes={'data': {}},
+ secrets=None,
+ configs=None,
+ ),
+ )
+ self.mock_client.remove_network.side_effect = NotFound(None, None, 'oops')
+ self.mock_client.remove_volume.side_effect = NotFound(None, None, 'oops')
+
+ project.down(ImageType.all, True)
+ self.mock_client.remove_image.assert_called_once_with("busybox:latest")
+
+ def test_warning_in_swarm_mode(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 1
+
+ def test_no_warning_on_stop(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.stop()
+ assert fake_log.warn.call_count == 0
+
+ def test_no_warning_in_normal_mode(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'inactive'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 0
+
+ def test_no_warning_with_no_swarm_info(self):
+ self.mock_client.info.return_value = {}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 0
diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py
new file mode 100644
index 00000000..7d61807b
--- /dev/null
+++ b/tests/unit/service_test.py
@@ -0,0 +1,1146 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import pytest
+from docker.errors import APIError
+
+from .. import mock
+from .. import unittest
+from compose.config.errors import DependencyError
+from compose.config.types import ServicePort
+from compose.config.types import ServiceSecret
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import LABEL_CONFIG_HASH
+from compose.const import LABEL_ONE_OFF
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.const import SECRETS_PATH
+from compose.container import Container
+from compose.project import OneOffFilter
+from compose.service import build_ulimits
+from compose.service import build_volume_binding
+from compose.service import BuildAction
+from compose.service import ContainerNetworkMode
+from compose.service import formatted_ports
+from compose.service import get_container_data_volumes
+from compose.service import ImageType
+from compose.service import merge_volume_bindings
+from compose.service import NeedsBuildError
+from compose.service import NetworkMode
+from compose.service import NoSuchImageError
+from compose.service import parse_repository_tag
+from compose.service import Service
+from compose.service import ServiceNetworkMode
+from compose.service import warn_on_masked_volume
+
+
+class ServiceTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_containers(self):
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+ self.mock_client.containers.return_value = []
+ self.assertEqual(list(service.containers()), [])
+
+ def test_containers_with_containers(self):
+ self.mock_client.containers.return_value = [
+ dict(Name=str(i), Image='foo', Id=i) for i in range(3)
+ ]
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+ self.assertEqual([c.id for c in service.containers()], list(range(3)))
+
+ expected_labels = [
+ '{0}=myproject'.format(LABEL_PROJECT),
+ '{0}=db'.format(LABEL_SERVICE),
+ '{0}=False'.format(LABEL_ONE_OFF),
+ ]
+
+ self.mock_client.containers.assert_called_once_with(
+ all=False,
+ filters={'label': expected_labels})
+
+ def test_container_without_name(self):
+ self.mock_client.containers.return_value = [
+ {'Image': 'foo', 'Id': '1', 'Name': '1'},
+ {'Image': 'foo', 'Id': '2', 'Name': None},
+ {'Image': 'foo', 'Id': '3'},
+ ]
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+
+ self.assertEqual([c.id for c in service.containers()], ['1'])
+ self.assertEqual(service._next_container_number(), 2)
+ self.assertEqual(service.get_container(1).id, '1')
+
+ def test_get_volumes_from_container(self):
+ container_id = 'aabbccddee'
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[
+ VolumeFromSpec(
+ mock.Mock(id=container_id, spec=Container),
+ 'rw',
+ 'container')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+
+ def test_get_volumes_from_container_read_only(self):
+ container_id = 'aabbccddee'
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[
+ VolumeFromSpec(
+ mock.Mock(id=container_id, spec=Container),
+ 'ro',
+ 'container')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':ro'])
+
+ def test_get_volumes_from_service_container_exists(self):
+ container_ids = ['aabbccddee', '12345']
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = [
+ mock.Mock(id=container_id, spec=Container)
+ for container_id in container_ids
+ ]
+ service = Service(
+ 'test',
+ volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')],
+ image='foo')
+
+ self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"])
+
+ def test_get_volumes_from_service_container_exists_with_flags(self):
+ for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
+ container_ids = ['aabbccddee:' + mode, '12345:' + mode]
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = [
+ mock.Mock(id=container_id.split(':')[0], spec=Container)
+ for container_id in container_ids
+ ]
+ service = Service(
+ 'test',
+ volumes_from=[VolumeFromSpec(from_service, mode, 'service')],
+ image='foo')
+
+ self.assertEqual(service._get_volumes_from(), [container_ids[0]])
+
+ def test_get_volumes_from_service_no_container(self):
+ container_id = 'abababab'
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = []
+ from_service.create_container.return_value = mock.Mock(
+ id=container_id,
+ spec=Container)
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+ from_service.create_container.assert_called_once_with()
+
+ def test_split_domainname_none(self):
+ service = Service('foo', image='foo', hostname='name', client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertFalse('domainname' in opts, 'domainname')
+
+ def test_memory_swap_limit(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ mem_limit=1000000000,
+ memswap_limit=2000000000)
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['mem_limit'],
+ 1000000000
+ )
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['memswap_limit'],
+ 2000000000
+ )
+
+ def test_self_reference_external_link(self):
+ service = Service(
+ name='foo',
+ external_links=['default_foo_1']
+ )
+ with self.assertRaises(DependencyError):
+ service.get_container_name(1)
+
+ def test_mem_reservation(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ mem_reservation='512m'
+ )
+ service._get_container_create_options({'some': 'overrides'}, 1)
+ assert self.mock_client.create_host_config.called is True
+ assert self.mock_client.create_host_config.call_args[1]['mem_reservation'] == '512m'
+
+ def test_cgroup_parent(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ cgroup_parent='test')
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['cgroup_parent'],
+ 'test'
+ )
+
+ def test_log_opt(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ log_opt = {'syslog-address': 'tcp://192.168.0.42:123'}
+ logging = {'driver': 'syslog', 'options': log_opt}
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ log_driver='syslog',
+ logging=logging)
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['log_config'],
+ {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
+ )
+
+ def test_split_domainname_fqdn(self):
+ service = Service(
+ 'foo',
+ hostname='name.domain.tld',
+ image='foo',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_split_domainname_both(self):
+ service = Service(
+ 'foo',
+ hostname='name',
+ image='foo',
+ domainname='domain.tld',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_split_domainname_weird(self):
+ service = Service(
+ 'foo',
+ hostname='name.sub',
+ domainname='domain.tld',
+ image='foo',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_no_default_hostname_when_not_using_networking(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ use_networking=False,
+ client=self.mock_client,
+ )
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertIsNone(opts.get('hostname'))
+
+ def test_get_container_create_options_with_name_option(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ client=self.mock_client,
+ container_name='foo1')
+ name = 'the_new_name'
+ opts = service._get_container_create_options(
+ {'name': name},
+ 1,
+ one_off=OneOffFilter.only)
+ self.assertEqual(opts['name'], name)
+
+ def test_get_container_create_options_does_not_mutate_options(self):
+ labels = {'thing': 'real'}
+ environment = {'also': 'real'}
+ service = Service(
+ 'foo',
+ image='foo',
+ labels=dict(labels),
+ client=self.mock_client,
+ environment=dict(environment),
+ )
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {}})
+ prev_container.get.return_value = None
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+
+ self.assertEqual(service.options['labels'], labels)
+ self.assertEqual(service.options['environment'], environment)
+
+ self.assertEqual(
+ opts['labels'][LABEL_CONFIG_HASH],
+ '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa')
+ assert opts['environment'] == ['also=real']
+
+ def test_get_container_create_options_sets_affinity_with_binds(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ client=self.mock_client,
+ )
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {'Volumes': ['/data']}})
+
+ def container_get(key):
+ return {
+ 'Mounts': [
+ {
+ 'Destination': '/data',
+ 'Source': '/some/path',
+ 'Name': 'abab1234',
+ },
+ ]
+ }.get(key, None)
+
+ prev_container.get.side_effect = container_get
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+
+ assert opts['environment'] == ['affinity:container==ababab']
+
+ def test_get_container_create_options_no_affinity_without_binds(self):
+ service = Service('foo', image='foo', client=self.mock_client)
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {}})
+ prev_container.get.return_value = None
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+ assert opts['environment'] == []
+
+ def test_get_container_not_found(self):
+ self.mock_client.containers.return_value = []
+ service = Service('foo', client=self.mock_client, image='foo')
+
+ self.assertRaises(ValueError, service.get_container)
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_get_container(self, mock_container_class):
+ container_dict = dict(Name='default_foo_2')
+ self.mock_client.containers.return_value = [container_dict]
+ service = Service('foo', image='foo', client=self.mock_client)
+
+ container = service.get_container(number=2)
+ self.assertEqual(container, mock_container_class.from_ps.return_value)
+ mock_container_class.from_ps.assert_called_once_with(
+ self.mock_client, container_dict)
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image(self, mock_log):
+ service = Service('foo', client=self.mock_client, image='someimage:sometag')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'someimage',
+ tag='sometag',
+ stream=True)
+ mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
+
+ def test_pull_image_no_tag(self):
+ service = Service('foo', client=self.mock_client, image='ababab')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'ababab',
+ tag='latest',
+ stream=True)
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image_digest(self, mock_log):
+ service = Service('foo', client=self.mock_client, image='someimage@sha256:1234')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'someimage',
+ tag='sha256:1234',
+ stream=True)
+ mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...')
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_recreate_container(self, _):
+ mock_container = mock.create_autospec(Container)
+ service = Service('foo', client=self.mock_client, image='someimage')
+ service.image = lambda: {'Id': 'abc123'}
+ new_container = service.recreate_container(mock_container)
+
+ mock_container.stop.assert_called_once_with(timeout=10)
+ mock_container.rename_to_tmp_name.assert_called_once_with()
+
+ new_container.start.assert_called_once_with()
+ mock_container.remove.assert_called_once_with()
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_recreate_container_with_timeout(self, _):
+ mock_container = mock.create_autospec(Container)
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+ service = Service('foo', client=self.mock_client, image='someimage')
+ service.recreate_container(mock_container, timeout=1)
+
+ mock_container.stop.assert_called_once_with(timeout=1)
+
+ def test_parse_repository_tag(self):
+ self.assertEqual(parse_repository_tag("root"), ("root", "", ":"))
+ self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":"))
+ self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":"))
+ self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":"))
+ self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":"))
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo:tag"),
+ ("url:5000/repo", "tag", ":"))
+ self.assertEqual(
+ parse_repository_tag("root@sha256:digest"),
+ ("root", "sha256:digest", "@"))
+ self.assertEqual(
+ parse_repository_tag("user/repo@sha256:digest"),
+ ("user/repo", "sha256:digest", "@"))
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo@sha256:digest"),
+ ("url:5000/repo", "sha256:digest", "@"))
+
+ def test_create_container(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.side_effect = [
+ NoSuchImageError,
+ {'Id': 'abc123'},
+ ]
+ self.mock_client.build.return_value = [
+ '{"stream": "Successfully built abcd"}',
+ ]
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ service.create_container()
+ assert mock_log.warn.called
+ _, args, _ = mock_log.warn.mock_calls[0]
+ assert 'was built because it did not already exist' in args[0]
+
+ self.mock_client.build.assert_called_once_with(
+ tag='default_foo',
+ dockerfile=None,
+ stream=True,
+ path='.',
+ pull=False,
+ forcerm=False,
+ nocache=False,
+ rm=True,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None,
+ )
+
+ def test_ensure_image_exists_no_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+
+ service.ensure_image_exists(do_build=BuildAction.skip)
+ assert not self.mock_client.build.called
+
+ def test_ensure_image_exists_no_build_but_needs_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.side_effect = NoSuchImageError
+ with pytest.raises(NeedsBuildError):
+ service.ensure_image_exists(do_build=BuildAction.skip)
+
+ def test_ensure_image_exists_force_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+ self.mock_client.build.return_value = [
+ '{"stream": "Successfully built abcd"}',
+ ]
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ service.ensure_image_exists(do_build=BuildAction.force)
+
+ assert not mock_log.warn.called
+ self.mock_client.build.assert_called_once_with(
+ tag='default_foo',
+ dockerfile=None,
+ stream=True,
+ path='.',
+ pull=False,
+ forcerm=False,
+ nocache=False,
+ rm=True,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None
+ )
+
+ def test_build_does_not_pull(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ service.build()
+
+ self.assertEqual(self.mock_client.build.call_count, 1)
+ self.assertFalse(self.mock_client.build.call_args[1]['pull'])
+
+ def test_build_with_override_build_args(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ build_args = {
+ 'arg1': 'arg1_new_value',
+ }
+ service = Service('foo', client=self.mock_client,
+ build={'context': '.', 'args': {'arg1': 'arg1', 'arg2': 'arg2'}})
+ service.build(build_args_override=build_args)
+
+ called_build_args = self.mock_client.build.call_args[1]['buildargs']
+
+ assert called_build_args['arg1'] == build_args['arg1']
+ assert called_build_args['arg2'] == 'arg2'
+
+ def test_config_dict(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ service = Service(
+ 'foo',
+ image='example.com/foo',
+ client=self.mock_client,
+ network_mode=ServiceNetworkMode(Service('other')),
+ networks={'default': None},
+ links=[(Service('one'), 'one')],
+ volumes_from=[VolumeFromSpec(Service('two'), 'rw', 'service')])
+
+ config_dict = service.config_dict()
+ expected = {
+ 'image_id': 'abcd',
+ 'options': {'image': 'example.com/foo'},
+ 'links': [('one', 'one')],
+ 'net': 'other',
+ 'networks': {'default': None},
+ 'volumes_from': [('two', 'rw')],
+ }
+ assert config_dict == expected
+
+ def test_config_dict_with_network_mode_from_container(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ container = Container(
+ self.mock_client,
+ {'Id': 'aaabbb', 'Name': '/foo_1'})
+ service = Service(
+ 'foo',
+ image='example.com/foo',
+ client=self.mock_client,
+ network_mode=ContainerNetworkMode(container))
+
+ config_dict = service.config_dict()
+ expected = {
+ 'image_id': 'abcd',
+ 'options': {'image': 'example.com/foo'},
+ 'links': [],
+ 'networks': {},
+ 'net': 'aaabbb',
+ 'volumes_from': [],
+ }
+ assert config_dict == expected
+
+ def test_remove_image_none(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert not web.remove_image(ImageType.none)
+ assert not self.mock_client.remove_image.called
+
+ def test_remove_image_local_with_image_name_doesnt_remove(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert not web.remove_image(ImageType.local)
+ assert not self.mock_client.remove_image.called
+
+ def test_remove_image_local_without_image_name_does_remove(self):
+ web = Service('web', build='.', client=self.mock_client)
+ assert web.remove_image(ImageType.local)
+ self.mock_client.remove_image.assert_called_once_with(web.image_name)
+
+ def test_remove_image_all_does_remove(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert web.remove_image(ImageType.all)
+ self.mock_client.remove_image.assert_called_once_with(web.image_name)
+
+ def test_remove_image_with_error(self):
+ self.mock_client.remove_image.side_effect = error = APIError(
+ message="testing",
+ response={},
+ explanation="Boom")
+
+ web = Service('web', image='example', client=self.mock_client)
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ assert not web.remove_image(ImageType.all)
+ mock_log.error.assert_called_once_with(
+ "Failed to remove image for service %s: %s", web.name, error)
+
+ def test_specifies_host_port_with_no_ports(self):
+ service = Service(
+ 'foo',
+ image='foo')
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_container_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["2000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["1000:2000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_host_ip_no_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1::2000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_ip_and_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1:1000:2000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_container_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["2000-3000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["1000-2000:2000-3000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_host_ip_no_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1::2000-3000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_ip_and_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1:1000-2000:2000-3000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_image_name_from_config(self):
+ image_name = 'example/web:latest'
+ service = Service('foo', image=image_name)
+ assert service.image_name == image_name
+
+ def test_image_name_default(self):
+ service = Service('foo', project='testing')
+ assert service.image_name == 'testing_foo'
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_only_log_warning_when_host_ports_clash(self, mock_log):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ name = 'foo'
+ service = Service(
+ name,
+ client=self.mock_client,
+ ports=["8080:80"])
+
+ service.scale(0)
+ self.assertFalse(mock_log.warn.called)
+
+ service.scale(1)
+ self.assertFalse(mock_log.warn.called)
+
+ service.scale(2)
+ mock_log.warn.assert_called_once_with(
+ 'The "{}" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'.format(name))
+
+
+class TestServiceNetwork(object):
+
+ def test_connect_container_to_networks_short_aliase_exists(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+ service = Service(
+ 'db',
+ mock_client,
+ 'myproject',
+ image='foo',
+ networks={'project_default': {}})
+ container = Container(
+ None,
+ {
+ 'Id': 'abcdef',
+ 'NetworkSettings': {
+ 'Networks': {
+ 'project_default': {
+ 'Aliases': ['analias', 'abcdef'],
+ },
+ },
+ },
+ },
+ True)
+ service.connect_container_to_networks(container)
+
+ assert not mock_client.disconnect_container_from_network.call_count
+ assert not mock_client.connect_container_to_network.call_count
+
+
+def sort_by_name(dictionary_list):
+ return sorted(dictionary_list, key=lambda k: k['name'])
+
+
+class BuildUlimitsTestCase(unittest.TestCase):
+
+ def test_build_ulimits_with_dict(self):
+ ulimits = build_ulimits(
+ {
+ 'nofile': {'soft': 10000, 'hard': 20000},
+ 'nproc': {'soft': 65535, 'hard': 65535}
+ }
+ )
+ expected = [
+ {'name': 'nofile', 'soft': 10000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+ def test_build_ulimits_with_ints(self):
+ ulimits = build_ulimits({'nofile': 20000, 'nproc': 65535})
+ expected = [
+ {'name': 'nofile', 'soft': 20000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+ def test_build_ulimits_with_integers_and_dicts(self):
+ ulimits = build_ulimits(
+ {
+ 'nproc': 65535,
+ 'nofile': {'soft': 10000, 'hard': 20000}
+ }
+ )
+ expected = [
+ {'name': 'nofile', 'soft': 10000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+
+class NetTestCase(unittest.TestCase):
+
+ def test_network_mode(self):
+ network_mode = NetworkMode('host')
+ self.assertEqual(network_mode.id, 'host')
+ self.assertEqual(network_mode.mode, 'host')
+ self.assertEqual(network_mode.service_name, None)
+
+ def test_network_mode_container(self):
+ container_id = 'abcd'
+ network_mode = ContainerNetworkMode(Container(None, {'Id': container_id}))
+ self.assertEqual(network_mode.id, container_id)
+ self.assertEqual(network_mode.mode, 'container:' + container_id)
+ self.assertEqual(network_mode.service_name, None)
+
+ def test_network_mode_service(self):
+ container_id = 'bbbb'
+ service_name = 'web'
+ mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.containers.return_value = [
+ {'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
+ ]
+
+ service = Service(name=service_name, client=mock_client)
+ network_mode = ServiceNetworkMode(service)
+
+ self.assertEqual(network_mode.id, service_name)
+ self.assertEqual(network_mode.mode, 'container:' + container_id)
+ self.assertEqual(network_mode.service_name, service_name)
+
+ def test_network_mode_service_no_containers(self):
+ service_name = 'web'
+ mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.containers.return_value = []
+
+ service = Service(name=service_name, client=mock_client)
+ network_mode = ServiceNetworkMode(service)
+
+ self.assertEqual(network_mode.id, service_name)
+ self.assertEqual(network_mode.mode, None)
+ self.assertEqual(network_mode.service_name, service_name)
+
+
+class ServicePortsTest(unittest.TestCase):
+ def test_formatted_ports(self):
+ ports = [
+ '3000',
+ '0.0.0.0:4025-4030:23000-23005',
+ ServicePort(6000, None, None, None, None),
+ ServicePort(8080, 8080, None, None, None),
+ ServicePort('20000', '20000', 'udp', 'ingress', None),
+ ServicePort(30000, '30000', 'tcp', None, '127.0.0.1'),
+ ]
+ formatted = formatted_ports(ports)
+ assert ports[0] in formatted
+ assert ports[1] in formatted
+ assert '6000/tcp' in formatted
+ assert '8080:8080/tcp' in formatted
+ assert '20000:20000/udp' in formatted
+ assert '127.0.0.1:30000:30000/tcp' in formatted
+
+
+def build_mount(destination, source, mode='rw'):
+ return {'Source': source, 'Destination': destination, 'Mode': mode}
+
+
+class ServiceVolumesTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_build_volume_binding(self):
+ binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
+ assert binding == ('/inside', '/outside:/inside:rw')
+
+ def test_get_container_data_volumes(self):
+ options = [VolumeSpec.parse(v) for v in [
+ '/host/volume:/host/volume:ro',
+ '/new/volume',
+ '/existing/volume',
+ 'named:/named/vol',
+ '/dev/tmpfs'
+ ]]
+
+ self.mock_client.inspect_image.return_value = {
+ 'ContainerConfig': {
+ 'Volumes': {
+ '/mnt/image/data': {},
+ }
+ }
+ }
+ container = Container(self.mock_client, {
+ 'Image': 'ababab',
+ 'Mounts': [
+ {
+ 'Source': '/host/volume',
+ 'Destination': '/host/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'hostvolume',
+ }, {
+ 'Source': '/var/lib/docker/aaaaaaaa',
+ 'Destination': '/existing/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'existingvolume',
+ }, {
+ 'Source': '/var/lib/docker/bbbbbbbb',
+ 'Destination': '/removed/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'removedvolume',
+ }, {
+ 'Source': '/var/lib/docker/cccccccc',
+ 'Destination': '/mnt/image/data',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'imagedata',
+ },
+ ]
+ }, has_been_inspected=True)
+
+ expected = [
+ VolumeSpec.parse('existingvolume:/existing/volume:rw'),
+ VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
+ ]
+
+ volumes = get_container_data_volumes(container, options, ['/dev/tmpfs'])
+ assert sorted(volumes) == sorted(expected)
+
+ def test_merge_volume_bindings(self):
+ options = [
+ VolumeSpec.parse(v, True) for v in [
+ '/host/volume:/host/volume:ro',
+ '/host/rw/volume:/host/rw/volume',
+ '/new/volume',
+ '/existing/volume',
+ '/dev/tmpfs'
+ ]
+ ]
+
+ self.mock_client.inspect_image.return_value = {
+ 'ContainerConfig': {'Volumes': {}}
+ }
+
+ previous_container = Container(self.mock_client, {
+ 'Id': 'cdefab',
+ 'Image': 'ababab',
+ 'Mounts': [{
+ 'Source': '/var/lib/docker/aaaaaaaa',
+ 'Destination': '/existing/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'existingvolume',
+ }],
+ }, has_been_inspected=True)
+
+ expected = [
+ '/host/volume:/host/volume:ro',
+ '/host/rw/volume:/host/rw/volume:rw',
+ 'existingvolume:/existing/volume:rw',
+ ]
+
+ binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container)
+ assert sorted(binds) == sorted(expected)
+ assert affinity == {'affinity:container': '=cdefab'}
+
+ def test_mount_same_host_path_to_two_volumes(self):
+ service = Service(
+ 'web',
+ image='busybox',
+ volumes=[
+ VolumeSpec.parse('/host/path:/data1', True),
+ VolumeSpec.parse('/host/path:/data2', True),
+ ],
+ client=self.mock_client,
+ )
+
+ self.mock_client.inspect_image.return_value = {
+ 'Id': 'ababab',
+ 'ContainerConfig': {
+ 'Volumes': {}
+ }
+ }
+
+ service._get_container_create_options(
+ override_options={},
+ number=1,
+ )
+
+ self.assertEqual(
+ set(self.mock_client.create_host_config.call_args[1]['binds']),
+ set([
+ '/host/path:/data1:rw',
+ '/host/path:/data2:rw',
+ ]),
+ )
+
+ def test_get_container_create_options_with_different_host_path_in_container_json(self):
+ service = Service(
+ 'web',
+ image='busybox',
+ volumes=[VolumeSpec.parse('/host/path:/data')],
+ client=self.mock_client,
+ )
+ volume_name = 'abcdefff1234'
+
+ self.mock_client.inspect_image.return_value = {
+ 'Id': 'ababab',
+ 'ContainerConfig': {
+ 'Volumes': {
+ '/data': {},
+ }
+ }
+ }
+
+ self.mock_client.inspect_container.return_value = {
+ 'Id': '123123123',
+ 'Image': 'ababab',
+ 'Mounts': [
+ {
+ 'Destination': '/data',
+ 'Source': '/mnt/sda1/host/path',
+ 'Mode': '',
+ 'RW': True,
+ 'Driver': 'local',
+ 'Name': volume_name,
+ },
+ ]
+ }
+
+ service._get_container_create_options(
+ override_options={},
+ number=1,
+ previous_container=Container(self.mock_client, {'Id': '123123123'}),
+ )
+
+ assert (
+ self.mock_client.create_host_config.call_args[1]['binds'] ==
+ ['{}:/data:rw'.format(volume_name)]
+ )
+
+ def test_warn_on_masked_volume_no_warning_when_no_container_volumes(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = []
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_warn_on_masked_volume_when_masked(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = [
+ VolumeSpec('/var/lib/docker/path', '/path', 'rw'),
+ VolumeSpec('/var/lib/docker/path', '/other', 'rw'),
+ ]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+
+ def test_warn_on_masked_no_warning_with_same_path(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = [VolumeSpec('/home/user', '/path', 'rw')]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_warn_on_masked_no_warning_with_container_only_option(self):
+ volumes_option = [VolumeSpec(None, '/path', 'rw')]
+ container_volumes = [
+ VolumeSpec('/var/lib/docker/volume/path', '/path', 'rw')
+ ]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_create_with_special_volume_mode(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
+
+ self.mock_client.create_container.return_value = {'Id': 'containerid'}
+
+ volume = '/tmp:/foo:z'
+ Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ volumes=[VolumeSpec.parse(volume, True)],
+ ).create_container()
+
+ assert self.mock_client.create_container.call_count == 1
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['binds'],
+ [volume])
+
+
+class ServiceSecretTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_get_secret_volumes(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': 'b.txt'}),
+ 'file': 'a.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
+
+ def test_get_secret_volumes_abspath(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': '/d.txt'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == secret1['secret'].target
+
+ def test_get_secret_volumes_no_target(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
diff --git a/tests/unit/split_buffer_test.py b/tests/unit/split_buffer_test.py
new file mode 100644
index 00000000..c41ea27d
--- /dev/null
+++ b/tests/unit/split_buffer_test.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .. import unittest
+from compose.utils import split_buffer
+
+
+class SplitBufferTest(unittest.TestCase):
+ def test_single_line_chunks(self):
+ def reader():
+ yield b'abc\n'
+ yield b'def\n'
+ yield b'ghi\n'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi\n'])
+
+ def test_no_end_separator(self):
+ def reader():
+ yield b'abc\n'
+ yield b'def\n'
+ yield b'ghi'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi'])
+
+ def test_multiple_line_chunk(self):
+ def reader():
+ yield b'abc\ndef\nghi'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi'])
+
+ def test_chunked_line(self):
+ def reader():
+ yield b'a'
+ yield b'b'
+ yield b'c'
+ yield b'\n'
+ yield b'd'
+
+ self.assert_produces(reader, ['abc\n', 'd'])
+
+ def test_preserves_unicode_sequences_within_lines(self):
+ string = u"a\u2022c\n"
+
+ def reader():
+ yield string.encode('utf-8')
+
+ self.assert_produces(reader, [string])
+
+ def assert_produces(self, reader, expectations):
+ split = split_buffer(reader())
+
+ for (actual, expected) in zip(split, expectations):
+ self.assertEqual(type(actual), type(expected))
+ self.assertEqual(actual, expected)
diff --git a/tests/unit/timeparse_test.py b/tests/unit/timeparse_test.py
new file mode 100644
index 00000000..9915932c
--- /dev/null
+++ b/tests/unit/timeparse_test.py
@@ -0,0 +1,56 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import timeparse
+
+
+def test_milli():
+ assert timeparse.timeparse('5ms') == 0.005
+
+
+def test_milli_float():
+ assert timeparse.timeparse('50.5ms') == 0.0505
+
+
+def test_second_milli():
+ assert timeparse.timeparse('200s5ms') == 200.005
+
+
+def test_second_milli_micro():
+ assert timeparse.timeparse('200s5ms10us') == 200.00501
+
+
+def test_second():
+ assert timeparse.timeparse('200s') == 200
+
+
+def test_second_as_float():
+ assert timeparse.timeparse('20.5s') == 20.5
+
+
+def test_minute():
+ assert timeparse.timeparse('32m') == 1920
+
+
+def test_hour_minute():
+ assert timeparse.timeparse('2h32m') == 9120
+
+
+def test_minute_as_float():
+ assert timeparse.timeparse('1.5m') == 90
+
+
+def test_hour_minute_second():
+ assert timeparse.timeparse('5h34m56s') == 20096
+
+
+def test_invalid_with_space():
+ assert timeparse.timeparse('5h 34m 56s') is None
+
+
+def test_invalid_with_comma():
+ assert timeparse.timeparse('5h,34m,56s') is None
+
+
+def test_invalid_with_empty_string():
+ assert timeparse.timeparse('') is None
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
new file mode 100644
index 00000000..84becb97
--- /dev/null
+++ b/tests/unit/utils_test.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import utils
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert utils.json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = utils.stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = utils.stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(utils.json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(utils.json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
+
+
+class TestParseBytes(object):
+ def test_parse_bytes(self):
+ assert utils.parse_bytes('123kb') == 123 * 1024
+ assert utils.parse_bytes(123) == 123
+ assert utils.parse_bytes('foobar') is None
+ assert utils.parse_bytes('123') == 123
diff --git a/tests/unit/volume_test.py b/tests/unit/volume_test.py
new file mode 100644
index 00000000..457d8558
--- /dev/null
+++ b/tests/unit/volume_test.py
@@ -0,0 +1,26 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import pytest
+
+from compose import volume
+from tests import mock
+
+
+@pytest.fixture
+def mock_client():
+ return mock.create_autospec(docker.APIClient)
+
+
+class TestVolume(object):
+
+ def test_remove_local_volume(self, mock_client):
+ vol = volume.Volume(mock_client, 'foo', 'project')
+ vol.remove()
+ mock_client.remove_volume.assert_called_once_with('foo_project')
+
+ def test_remove_external_volume(self, mock_client):
+ vol = volume.Volume(mock_client, 'foo', 'project', external=True)
+ vol.remove()
+ assert not mock_client.remove_volume.called