summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
committerFelipe Sateler <fsateler@debian.org>2017-11-19 18:27:48 -0300
commita997ae5b1840f2878b16443bd8e3c784d23ba9ac (patch)
tree35a232d0ad24ed27ccc2ad4dfa45f7c7496f55b3
Import docker-compose_1.17.1.orig.tar.gz
[dgit import orig docker-compose_1.17.1.orig.tar.gz]
-rw-r--r--.dockerignore11
-rw-r--r--.gitignore14
-rw-r--r--.pre-commit-config.yaml25
-rw-r--r--.travis.yml29
-rw-r--r--CHANGELOG.md1591
l---------CHANGES.md1
-rw-r--r--CONTRIBUTING.md74
-rw-r--r--Dockerfile84
-rw-r--r--Dockerfile.armhf71
-rw-r--r--Dockerfile.run14
-rw-r--r--Dockerfile.s390x15
-rw-r--r--Jenkinsfile64
-rw-r--r--LICENSE191
-rw-r--r--MAINTAINERS52
-rw-r--r--MANIFEST.in15
-rw-r--r--README.md65
-rw-r--r--ROADMAP.md32
-rw-r--r--SWARM.md1
-rw-r--r--appveyor.yml24
-rwxr-xr-xbin/docker-compose6
-rw-r--r--compose/__init__.py4
-rw-r--r--compose/__main__.py6
-rw-r--r--compose/bundle.py258
-rw-r--r--compose/cli/__init__.py49
-rw-r--r--compose/cli/colors.py49
-rw-r--r--compose/cli/command.py120
-rw-r--r--compose/cli/docker_client.py95
-rw-r--r--compose/cli/docopt_command.py59
-rw-r--r--compose/cli/errors.py162
-rw-r--r--compose/cli/formatter.py51
-rw-r--r--compose/cli/log_printer.py250
-rw-r--r--compose/cli/main.py1297
-rw-r--r--compose/cli/signals.py30
-rw-r--r--compose/cli/utils.py150
-rw-r--r--compose/cli/verbose_proxy.py60
-rw-r--r--compose/config/__init__.py12
-rw-r--r--compose/config/config.py1306
-rw-r--r--compose/config/config_schema_v1.json188
-rw-r--r--compose/config/config_schema_v2.0.json389
-rw-r--r--compose/config/config_schema_v2.1.json441
-rw-r--r--compose/config/config_schema_v2.2.json448
-rw-r--r--compose/config/config_schema_v2.3.json451
-rw-r--r--compose/config/config_schema_v3.0.json384
-rw-r--r--compose/config/config_schema_v3.1.json429
-rw-r--r--compose/config/config_schema_v3.2.json476
-rw-r--r--compose/config/config_schema_v3.3.json535
-rw-r--r--compose/config/config_schema_v3.4.json544
-rw-r--r--compose/config/config_schema_v3.5.json542
-rw-r--r--compose/config/environment.py120
-rw-r--r--compose/config/errors.py55
-rw-r--r--compose/config/interpolation.py102
-rw-r--r--compose/config/serialize.py145
-rw-r--r--compose/config/sort_services.py73
-rw-r--r--compose/config/types.py351
-rw-r--r--compose/config/validation.py467
-rw-r--r--compose/const.py63
-rw-r--r--compose/container.py276
-rw-r--r--compose/errors.py33
-rw-r--r--compose/network.py286
-rw-r--r--compose/parallel.py298
-rw-r--r--compose/progress_stream.py111
-rw-r--r--compose/project.py674
-rw-r--r--compose/service.py1428
-rw-r--r--compose/state.py0
-rw-r--r--compose/timeparse.py96
-rw-r--r--compose/utils.py145
-rw-r--r--compose/version.py10
-rw-r--r--compose/volume.py149
-rw-r--r--contrib/completion/bash/docker-compose629
-rw-r--r--contrib/completion/fish/docker-compose.fish24
-rw-r--r--contrib/completion/zsh/_docker-compose474
-rwxr-xr-xcontrib/migration/migrate-compose-file-v1-to-v2.py173
-rw-r--r--docker-compose.spec81
-rw-r--r--docs/README.md16
-rw-r--r--experimental/compose_swarm_networking.md5
-rw-r--r--logo.pngbin0 -> 39135 bytes
-rw-r--r--project/ISSUE-TRIAGE.md35
-rw-r--r--project/RELEASE-PROCESS.md148
-rw-r--r--requirements-build.txt1
-rw-r--r--requirements-dev.txt5
-rw-r--r--requirements.txt22
-rwxr-xr-xscript/build/image17
-rwxr-xr-xscript/build/linux13
-rwxr-xr-xscript/build/linux-entrypoint15
-rwxr-xr-xscript/build/osx15
-rwxr-xr-xscript/build/test-image17
-rw-r--r--script/build/windows.ps160
-rwxr-xr-xscript/build/write-git-sha7
-rwxr-xr-xscript/ci8
-rwxr-xr-xscript/clean7
-rwxr-xr-xscript/release/build-binaries40
-rwxr-xr-xscript/release/cherry-pick-pr34
-rwxr-xr-xscript/release/contributors30
-rwxr-xr-xscript/release/download-binaries32
-rwxr-xr-xscript/release/make-branch86
-rwxr-xr-xscript/release/push-release82
-rwxr-xr-xscript/release/rebase-bump-commit38
-rw-r--r--script/release/utils.sh23
-rw-r--r--script/run/run.ps122
-rwxr-xr-xscript/run/run.sh57
-rwxr-xr-xscript/setup/osx52
-rwxr-xr-xscript/test/all64
-rwxr-xr-xscript/test/ci25
-rwxr-xr-xscript/test/default19
-rwxr-xr-xscript/test/versions.py162
-rw-r--r--script/travis/bintray.json.tmpl29
-rwxr-xr-xscript/travis/build-binary13
-rwxr-xr-xscript/travis/ci10
-rwxr-xr-xscript/travis/install10
-rwxr-xr-xscript/travis/render-bintray-config.py13
-rw-r--r--setup.cfg2
-rw-r--r--setup.py103
-rw-r--r--tests/__init__.py14
-rw-r--r--tests/acceptance/__init__.py0
-rw-r--r--tests/acceptance/cli_test.py2390
-rw-r--r--tests/fixtures/UpperCaseDir/docker-compose.yml6
-rw-r--r--tests/fixtures/abort-on-container-exit-0/docker-compose.yml6
-rw-r--r--tests/fixtures/abort-on-container-exit-1/docker-compose.yml6
-rw-r--r--tests/fixtures/build-ctx/Dockerfile3
-rw-r--r--tests/fixtures/build-path-override-dir/docker-compose.yml2
-rw-r--r--tests/fixtures/build-path/docker-compose.yml2
-rw-r--r--tests/fixtures/build-shm-size/Dockerfile4
-rw-r--r--tests/fixtures/build-shm-size/docker-compose.yml7
-rw-r--r--tests/fixtures/bundle-with-digests/docker-compose.yml9
-rw-r--r--tests/fixtures/commands-composefile/docker-compose.yml5
-rw-r--r--tests/fixtures/default-env-file/.env4
-rw-r--r--tests/fixtures/default-env-file/docker-compose.yml6
-rw-r--r--tests/fixtures/dockerfile-with-volume/Dockerfile4
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/echo-services/docker-compose.yml6
-rw-r--r--tests/fixtures/entrypoint-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/entrypoint-dockerfile/Dockerfile4
-rw-r--r--tests/fixtures/entrypoint-dockerfile/docker-compose.yml4
-rw-r--r--tests/fixtures/env-file/docker-compose.yml4
-rw-r--r--tests/fixtures/env-file/test.env1
-rw-r--r--tests/fixtures/env/one.env11
-rw-r--r--tests/fixtures/env/resolve.env4
-rw-r--r--tests/fixtures/env/two.env2
-rw-r--r--tests/fixtures/environment-composefile/docker-compose.yml7
-rw-r--r--tests/fixtures/environment-interpolation/docker-compose.yml17
-rw-r--r--tests/fixtures/exit-code-from/docker-compose.yml6
-rw-r--r--tests/fixtures/expose-composefile/docker-compose.yml11
-rw-r--r--tests/fixtures/extends/circle-1.yml12
-rw-r--r--tests/fixtures/extends/circle-2.yml12
-rw-r--r--tests/fixtures/extends/common-env-labels-ulimits.yml13
-rw-r--r--tests/fixtures/extends/common.yml7
-rw-r--r--tests/fixtures/extends/docker-compose.yml17
-rw-r--r--tests/fixtures/extends/healthcheck-1.yml9
-rw-r--r--tests/fixtures/extends/healthcheck-2.yml6
-rw-r--r--tests/fixtures/extends/invalid-links.yml11
-rw-r--r--tests/fixtures/extends/invalid-net-v2.yml12
-rw-r--r--tests/fixtures/extends/invalid-net.yml8
-rw-r--r--tests/fixtures/extends/invalid-volumes.yml9
-rw-r--r--tests/fixtures/extends/nested-intermediate.yml6
-rw-r--r--tests/fixtures/extends/nested.yml6
-rw-r--r--tests/fixtures/extends/no-file-specified.yml9
-rw-r--r--tests/fixtures/extends/nonexistent-path-base.yml6
-rw-r--r--tests/fixtures/extends/nonexistent-path-child.yml8
-rw-r--r--tests/fixtures/extends/nonexistent-service.yml4
-rw-r--r--tests/fixtures/extends/service-with-invalid-schema.yml4
-rw-r--r--tests/fixtures/extends/service-with-valid-composite-extends.yml5
-rw-r--r--tests/fixtures/extends/specify-file-as-self.yml17
-rw-r--r--tests/fixtures/extends/valid-common-config.yml6
-rw-r--r--tests/fixtures/extends/valid-common.yml3
-rw-r--r--tests/fixtures/extends/valid-composite-extends.yml2
-rw-r--r--tests/fixtures/extends/valid-interpolation-2.yml3
-rw-r--r--tests/fixtures/extends/valid-interpolation.yml5
-rw-r--r--tests/fixtures/extends/verbose-and-shorthand.yml15
-rw-r--r--tests/fixtures/healthcheck/docker-compose.yml24
-rw-r--r--tests/fixtures/invalid-composefile/invalid.yml5
-rw-r--r--tests/fixtures/links-composefile/docker-compose.yml11
-rw-r--r--tests/fixtures/logging-composefile-legacy/docker-compose.yml10
-rw-r--r--tests/fixtures/logging-composefile/docker-compose.yml14
-rw-r--r--tests/fixtures/logs-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/logs-tail-composefile/docker-compose.yml3
-rw-r--r--tests/fixtures/longer-filename-composefile/docker-compose.yaml3
-rw-r--r--tests/fixtures/multiple-composefiles/compose2.yml3
-rw-r--r--tests/fixtures/multiple-composefiles/docker-compose.yml6
-rw-r--r--tests/fixtures/net-container/docker-compose.yml7
-rw-r--r--tests/fixtures/net-container/v2-invalid.yml10
-rw-r--r--tests/fixtures/networks/bridge.yml9
-rw-r--r--tests/fixtures/networks/default-network-config.yml13
-rw-r--r--tests/fixtures/networks/docker-compose.yml21
-rw-r--r--tests/fixtures/networks/external-default.yml12
-rw-r--r--tests/fixtures/networks/external-networks.yml16
-rw-r--r--tests/fixtures/networks/missing-network.yml10
-rw-r--r--tests/fixtures/networks/network-aliases.yml16
-rwxr-xr-xtests/fixtures/networks/network-internal.yml13
-rw-r--r--tests/fixtures/networks/network-label.yml13
-rw-r--r--tests/fixtures/networks/network-mode.yml27
-rwxr-xr-xtests/fixtures/networks/network-static-addresses.yml23
-rw-r--r--tests/fixtures/no-composefile/.gitignore0
-rw-r--r--tests/fixtures/no-links-composefile/docker-compose.yml9
-rw-r--r--tests/fixtures/no-services/docker-compose.yml5
-rw-r--r--tests/fixtures/override-files/docker-compose.override.yml7
-rw-r--r--tests/fixtures/override-files/docker-compose.yml10
-rw-r--r--tests/fixtures/override-files/extra.yml10
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/pid-mode/docker-compose.yml17
-rw-r--r--tests/fixtures/ports-composefile-scale/docker-compose.yml6
-rw-r--r--tests/fixtures/ports-composefile/docker-compose.yml8
-rw-r--r--tests/fixtures/ports-composefile/expanded-notation.yml15
-rw-r--r--tests/fixtures/restart/docker-compose.yml17
-rw-r--r--tests/fixtures/run-workdir/docker-compose.yml4
-rw-r--r--tests/fixtures/scale/docker-compose.yml9
-rw-r--r--tests/fixtures/secrets/default1
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml9
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/files/example.txt1
-rw-r--r--tests/fixtures/simple-composefile/digest.yml6
-rw-r--r--tests/fixtures/simple-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/simple-composefile/ignore-pull-failures.yml6
-rw-r--r--tests/fixtures/simple-dockerfile/Dockerfile3
-rw-r--r--tests/fixtures/simple-dockerfile/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-failing-dockerfile/Dockerfile7
-rw-r--r--tests/fixtures/simple-failing-dockerfile/docker-compose.yml2
-rw-r--r--tests/fixtures/sleeps-composefile/docker-compose.yml10
-rw-r--r--tests/fixtures/stop-signal-composefile/docker-compose.yml10
-rw-r--r--tests/fixtures/tls/ca.pem0
-rw-r--r--tests/fixtures/tls/cert.pem0
-rw-r--r--tests/fixtures/tls/key.key0
-rw-r--r--tests/fixtures/top/docker-compose.yml6
-rw-r--r--tests/fixtures/unicode-environment/docker-compose.yml7
-rw-r--r--tests/fixtures/user-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/v1-config/docker-compose.yml10
-rw-r--r--tests/fixtures/v2-dependencies/docker-compose.yml13
-rw-r--r--tests/fixtures/v2-full/Dockerfile4
-rw-r--r--tests/fixtures/v2-full/docker-compose.yml24
-rw-r--r--tests/fixtures/v2-simple/docker-compose.yml8
-rw-r--r--tests/fixtures/v2-simple/links-invalid.yml10
-rw-r--r--tests/fixtures/v3-full/docker-compose.yml57
-rw-r--r--tests/fixtures/volume-path-interpolation/docker-compose.yml5
-rw-r--r--tests/fixtures/volume-path/common/services.yml5
-rw-r--r--tests/fixtures/volume-path/docker-compose.yml6
-rw-r--r--tests/fixtures/volume/docker-compose.yml11
-rw-r--r--tests/fixtures/volumes-from-container/docker-compose.yml5
-rw-r--r--tests/fixtures/volumes/docker-compose.yml2
-rw-r--r--tests/fixtures/volumes/external-volumes-v2-x.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v2.yml16
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-4.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-x.yml16
-rw-r--r--tests/fixtures/volumes/volume-label.yml13
-rw-r--r--tests/helpers.py50
-rw-r--r--tests/integration/__init__.py0
-rw-r--r--tests/integration/network_test.py17
-rw-r--r--tests/integration/project_test.py1636
-rw-r--r--tests/integration/resilience_test.py57
-rw-r--r--tests/integration/service_test.py1380
-rw-r--r--tests/integration/state_test.py308
-rw-r--r--tests/integration/testcases.py187
-rw-r--r--tests/integration/volume_test.py126
-rw-r--r--tests/unit/__init__.py0
-rw-r--r--tests/unit/bundle_test.py222
-rw-r--r--tests/unit/cli/__init__.py0
-rw-r--r--tests/unit/cli/command_test.py76
-rw-r--r--tests/unit/cli/docker_client_test.py187
-rw-r--r--tests/unit/cli/errors_test.py88
-rw-r--r--tests/unit/cli/formatter_test.py53
-rw-r--r--tests/unit/cli/log_printer_test.py201
-rw-r--r--tests/unit/cli/main_test.py104
-rw-r--r--tests/unit/cli/utils_test.py23
-rw-r--r--tests/unit/cli/verbose_proxy_test.py33
-rw-r--r--tests/unit/cli_test.py214
-rw-r--r--tests/unit/config/__init__.py0
-rw-r--r--tests/unit/config/config_test.py4482
-rw-r--r--tests/unit/config/environment_test.py40
-rw-r--r--tests/unit/config/interpolation_test.py148
-rw-r--r--tests/unit/config/sort_services_test.py243
-rw-r--r--tests/unit/config/types_test.py235
-rw-r--r--tests/unit/container_test.py198
-rw-r--r--tests/unit/network_test.py161
-rw-r--r--tests/unit/parallel_test.py163
-rw-r--r--tests/unit/progress_stream_test.py87
-rw-r--r--tests/unit/project_test.py570
-rw-r--r--tests/unit/service_test.py1146
-rw-r--r--tests/unit/split_buffer_test.py54
-rw-r--r--tests/unit/timeparse_test.py56
-rw-r--r--tests/unit/utils_test.py70
-rw-r--r--tests/unit/volume_test.py26
-rw-r--r--tox.ini54
283 files changed, 34967 insertions, 0 deletions
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..eccd86dd
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,11 @@
+*.egg-info
+.coverage
+.git
+.tox
+build
+coverage-html
+docs/_site
+venv
+.tox
+**/__pycache__
+*.pyc
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..ef04ca15
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,14 @@
+*.egg-info
+*.pyc
+.coverage*
+/.tox
+/build
+/coverage-html
+/dist
+/docs/_site
+/venv
+README.rst
+compose/GITSHA
+*.swo
+*.swp
+.DS_Store
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..b7bcc846
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,25 @@
+- repo: git://github.com/pre-commit/pre-commit-hooks
+ sha: 'v0.9.1'
+ hooks:
+ - id: check-added-large-files
+ - id: check-docstring-first
+ - id: check-merge-conflict
+ - id: check-yaml
+ - id: check-json
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: flake8
+ - id: name-tests-test
+ exclude: 'tests/(integration/testcases\.py|helpers\.py)'
+ - id: requirements-txt-fixer
+ - id: trailing-whitespace
+- repo: git://github.com/asottile/reorder_python_imports
+ sha: v0.3.5
+ hooks:
+ - id: reorder-python-imports
+ language_version: 'python2.7'
+ args:
+ - --add-import
+ - from __future__ import absolute_import
+ - --add-import
+ - from __future__ import unicode_literals
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..fbf26964
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,29 @@
+sudo: required
+
+language: python
+
+matrix:
+ include:
+ - os: linux
+ services:
+ - docker
+ - os: osx
+ language: generic
+
+install: ./script/travis/install
+
+script:
+ - ./script/travis/ci
+ - ./script/travis/build-binary
+
+before_deploy:
+ - "./script/travis/render-bintray-config.py < ./script/travis/bintray.json.tmpl > ./bintray.json"
+
+deploy:
+ provider: bintray
+ user: docker-compose-roleuser
+ key: '$BINTRAY_API_KEY'
+ file: ./bintray.json
+ skip_cleanup: true
+ on:
+ all_branches: true
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..d0be7ea7
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,1591 @@
+Change log
+==========
+
+1.17.1 (2017-11-08)
+------------------
+
+### Bugfixes
+
+- Fixed a bug that would prevent creating new containers when using
+ container labels in the list format as part of the service's definition.
+
+1.17.0 (2017-11-02)
+-------------------
+
+### New features
+
+#### Compose file version 3.4
+
+- Introduced version 3.4 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+
+- Added support for `cache_from`, `network` and `target` options in build
+ configurations
+
+- Added support for the `order` parameter in the `update_config` section
+
+- Added support for setting a custom name in volume definitions using
+ the `name` parameter
+
+#### Compose file version 2.3
+
+- Added support for `shm_size` option in build configuration
+
+#### Compose file version 2.x
+
+- Added support for extension fields (`x-*`). Also available for v3.4 files
+
+#### All formats
+
+- Added new `--no-start` to the `up` command, allowing users to create all
+ resources (networks, volumes, containers) without starting services.
+ The `create` command is deprecated in favor of this new option
+
+### Bugfixes
+
+- Fixed a bug where `extra_hosts` values would be overridden by extension
+ files instead of merging together
+
+- Fixed a bug where the validation for v3.2 files would prevent using the
+ `consistency` field in service volume definitions
+
+- Fixed a bug that would cause a crash when configuration fields expecting
+ unique items would contain duplicates
+
+- Fixed a bug where mount overrides with a different mode would create a
+ duplicate entry instead of overriding the original entry
+
+- Fixed a bug where build labels declared as a list wouldn't be properly
+ parsed
+
+- Fixed a bug where the output of `docker-compose config` would be invalid
+ for some versions if the file contained custom-named external volumes
+
+- Improved error handling when issuing a build command on Windows using an
+ unsupported file version
+
+- Fixed an issue where networks with identical names would sometimes be
+ created when running `up` commands concurrently.
+
+1.16.1 (2017-09-01)
+-------------------
+
+### Bugfixes
+
+- Fixed bug that prevented using `extra_hosts` in several configuration files.
+
+1.16.0 (2017-08-31)
+-------------------
+
+### New features
+
+#### Compose file version 2.3
+
+- Introduced version 2.3 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+
+- Added support for the `target` parameter in build configurations
+
+- Added support for the `start_period` parameter in healthcheck
+ configurations
+
+#### Compose file version 2.x
+
+- Added support for the `blkio_config` parameter in service definitions
+
+- Added support for setting a custom name in volume definitions using
+ the `name` parameter (not available for version 2.0)
+
+#### All formats
+
+- Added new CLI flag `--no-ansi` to suppress ANSI control characters in
+ output
+
+### Bugfixes
+
+- Fixed a bug where nested `extends` instructions weren't resolved
+ properly, causing "file not found" errors
+
+- Fixed several issues with `.dockerignore` parsing
+
+- Fixed issues where logs of TTY-enabled services were being printed
+ incorrectly and causing `MemoryError` exceptions
+
+- Fixed a bug where printing application logs would sometimes be interrupted
+ by a `UnicodeEncodeError` exception on Python 3
+
+- The `$` character in the output of `docker-compose config` is now
+ properly escaped
+
+- Fixed a bug where running `docker-compose top` would sometimes fail
+ with an uncaught exception
+
+- Fixed a bug where `docker-compose pull` with the `--parallel` flag
+ would return a `0` exit code when failing
+
+- Fixed an issue where keys in `deploy.resources` were not being validated
+
+- Fixed an issue where the `logging` options in the output of
+ `docker-compose config` would be set to `null`, an invalid value
+
+- Fixed the output of the `docker-compose images` command when an image
+ would come from a private repository using an explicit port number
+
+- Fixed the output of `docker-compose config` when a port definition used
+ `0` as the value for the published port
+
+1.15.0 (2017-07-26)
+-------------------
+
+### New features
+
+#### Compose file version 2.2
+
+- Added support for the `network` parameter in build configurations.
+
+#### Compose file version 2.1 and up
+
+- The `pid` option in a service's definition now supports a `service:<name>`
+ value.
+
+- Added support for the `storage_opt` parameter in in service definitions.
+ This option is not available for the v3 format
+
+#### All formats
+
+- Added `--quiet` flag to `docker-compose pull`, suppressing progress output
+
+- Some improvements to CLI output
+
+### Bugfixes
+
+- Volumes specified through the `--volume` flag of `docker-compose run` now
+ complement volumes declared in the service's defintion instead of replacing
+ them
+
+- Fixed a bug where using multiple Compose files would unset the scale value
+ defined inside the Compose file.
+
+- Fixed an issue where the `credHelpers` entries in the `config.json` file
+ were not being honored by Compose
+
+- Fixed a bug where using multiple Compose files with port declarations
+ would cause failures in Python 3 environments
+
+- Fixed a bug where some proxy-related options present in the user's
+ environment would prevent Compose from running
+
+- Fixed an issue where the output of `docker-compose config` would be invalid
+ if the original file used `Y` or `N` values
+
+- Fixed an issue preventing `up` operations on a previously created stack on
+ Windows Engine.
+
+1.14.0 (2017-06-19)
+-------------------
+
+### New features
+
+#### Compose file version 3.3
+
+- Introduced version 3.3 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+ Note: the `credential_spec` and `configs` keys only apply to Swarm services
+ and will be ignored by Compose
+
+#### Compose file version 2.2
+
+- Added the following parameters in service definitions: `cpu_count`,
+ `cpu_percent`, `cpus`
+
+#### Compose file version 2.1
+
+- Added support for build labels. This feature is also available in the
+ 2.2 and 3.3 formats.
+
+#### All formats
+
+- Added shorthand `-u` for `--user` flag in `docker-compose exec`
+
+- Differences in labels between the Compose file and remote network
+ will now print a warning instead of preventing redeployment.
+
+### Bugfixes
+
+- Fixed a bug where service's dependencies were being rescaled to their
+ default scale when running a `docker-compose run` command
+
+- Fixed a bug where `docker-compose rm` with the `--stop` flag was not
+ behaving properly when provided with a list of services to remove
+
+- Fixed a bug where `cache_from` in the build section would be ignored when
+ using more than one Compose file.
+
+- Fixed a bug that prevented binding the same port to different IPs when
+ using more than one Compose file.
+
+- Fixed a bug where override files would not be picked up by Compose if they
+ had the `.yaml` extension
+
+- Fixed a bug on Windows Engine where networks would be incorrectly flagged
+ for recreation
+
+- Fixed a bug where services declaring ports would cause crashes on some
+ versions of Python 3
+
+- Fixed a bug where the output of `docker-compose config` would sometimes
+ contain invalid port definitions
+
+1.13.0 (2017-05-02)
+-------------------
+
+### Breaking changes
+
+- `docker-compose up` now resets a service's scaling to its default value.
+ You can use the newly introduced `--scale` option to specify a custom
+ scale value
+
+### New features
+
+#### Compose file version 2.2
+
+- Introduced version 2.2 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.13.0 or above
+
+- Added support for `init` in service definitions.
+
+- Added support for `scale` in service definitions. The configuration's value
+ can be overridden using the `--scale` flag in `docker-compose up`.
+ Please note that the `scale` command is disabled for this file format
+
+#### Compose file version 2.x
+
+- Added support for `options` in the `ipam` section of network definitions
+
+### Bugfixes
+
+- Fixed a bug where paths provided to compose via the `-f` option were not
+ being resolved properly
+
+- Fixed a bug where the `ext_ip::target_port` notation in the ports section
+ was incorrectly marked as invalid
+
+- Fixed an issue where the `exec` command would sometimes not return control
+ to the terminal when using the `-d` flag
+
+- Fixed a bug where secrets were missing from the output of the `config`
+ command for v3.2 files
+
+- Fixed an issue where `docker-compose` would hang if no internet connection
+ was available
+
+- Fixed an issue where paths containing unicode characters passed via the `-f`
+ flag were causing Compose to crash
+
+- Fixed an issue where the output of `docker-compose config` would be invalid
+ if the Compose file contained external secrets
+
+- Fixed a bug where using `--exit-code-from` with `up` would fail if Compose
+ was installed in a Python 3 environment
+
+- Fixed a bug where recreating containers using a combination of `tmpfs` and
+ `volumes` would result in an invalid config state
+
+
+1.12.0 (2017-04-04)
+-------------------
+
+### New features
+
+#### Compose file version 3.2
+
+- Introduced version 3.2 of the `docker-compose.yml` specification
+
+- Added support for `cache_from` in the `build` section of services
+
+- Added support for the new expanded ports syntax in service definitions
+
+- Added support for the new expanded volumes syntax in service definitions
+
+#### Compose file version 2.1
+
+- Added support for `pids_limit` in service definitions
+
+#### Compose file version 2.0 and up
+
+- Added `--volumes` option to `docker-compose config` that lists named
+ volumes declared for that project
+
+- Added support for `mem_reservation` in service definitions (2.x only)
+
+- Added support for `dns_opt` in service definitions (2.x only)
+
+#### All formats
+
+- Added a new `docker-compose images` command that lists images used by
+ the current project's containers
+
+- Added a `--stop` (shorthand `-s`) option to `docker-compose rm` that stops
+ the running containers before removing them
+
+- Added a `--resolve-image-digests` option to `docker-compose config` that
+ pins the image version for each service to a permanent digest
+
+- Added a `--exit-code-from SERVICE` option to `docker-compose up`. When
+ used, `docker-compose` will exit on any container's exit with the code
+ corresponding to the specified service's exit code
+
+- Added a `--parallel` option to `docker-compose pull` that enables images
+ for multiple services to be pulled simultaneously
+
+- Added a `--build-arg` option to `docker-compose build`
+
+- Added a `--volume <volume_mapping>` (shorthand `-v`) option to
+ `docker-compose run` to declare runtime volumes to be mounted
+
+- Added a `--project-directory PATH` option to `docker-compose` that will
+ affect path resolution for the project
+
+- When using `--abort-on-container-exit` in `docker-compose up`, the exit
+ code for the container that caused the abort will be the exit code of
+ the `docker-compose up` command
+
+- Users can now configure which path separator character they want to use
+ to separate the `COMPOSE_FILE` environment value using the
+ `COMPOSE_PATH_SEPARATOR` environment variable
+
+- Added support for port range to single port in port mappings
+ (e.g. `8000-8010:80`)
+
+### Bugfixes
+
+- `docker-compose run --rm` now removes anonymous volumes after execution,
+ matching the behavior of `docker run --rm`.
+
+- Fixed a bug where override files containing port lists would cause a
+ TypeError to be raised
+
+- Fixed a bug where the `deploy` key would be missing from the output of
+ `docker-compose config`
+
+- Fixed a bug where scaling services up or down would sometimes re-use
+ obsolete containers
+
+- Fixed a bug where the output of `docker-compose config` would be invalid
+ if the project declared anonymous volumes
+
+- Variable interpolation now properly occurs in the `secrets` section of
+ the Compose file
+
+- The `secrets` section now properly appears in the output of
+ `docker-compose config`
+
+- Fixed a bug where changes to some networks properties would not be
+ detected against previously created networks
+
+- Fixed a bug where `docker-compose` would crash when trying to write into
+ a closed pipe
+
+- Fixed an issue where Compose would not pick up on the value of
+ COMPOSE_TLS_VERSION when used in combination with command-line TLS flags
+
+1.11.2 (2017-02-17)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug that was preventing secrets configuration from being
+ loaded properly
+
+- Fixed a bug where the `docker-compose config` command would fail
+ if the config file contained secrets definitions
+
+- Fixed an issue where Compose on some linux distributions would
+ pick up and load an outdated version of the requests library
+
+- Fixed an issue where socket-type files inside a build folder
+ would cause `docker-compose` to crash when trying to build that
+ service
+
+- Fixed an issue where recursive wildcard patterns `**` were not being
+ recognized in `.dockerignore` files.
+
+1.11.1 (2017-02-09)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug where the 3.1 file format was not being recognized as valid
+ by the Compose parser
+
+1.11.0 (2017-02-08)
+-------------------
+
+### New Features
+
+#### Compose file version 3.1
+
+- Introduced version 3.1 of the `docker-compose.yml` specification. This
+ version requires Docker Engine 1.13.0 or above. It introduces support
+ for secrets. See the documentation for more information
+
+#### Compose file version 2.0 and up
+
+- Introduced the `docker-compose top` command that displays processes running
+ for the different services managed by Compose.
+
+### Bugfixes
+
+- Fixed a bug where extending a service defining a healthcheck dictionary
+ would cause `docker-compose` to error out.
+
+- Fixed an issue where the `pid` entry in a service definition was being
+ ignored when using multiple Compose files.
+
+1.10.1 (2017-02-01)
+------------------
+
+### Bugfixes
+
+- Fixed an issue where presence of older versions of the docker-py
+ package would cause unexpected crashes while running Compose
+
+- Fixed an issue where healthcheck dependencies would be lost when
+ using multiple compose files for a project
+
+- Fixed a few issues that made the output of the `config` command
+ invalid
+
+- Fixed an issue where adding volume labels to v3 Compose files would
+ result in an error
+
+- Fixed an issue on Windows where build context paths containing unicode
+ characters were being improperly encoded
+
+- Fixed a bug where Compose would occasionally crash while streaming logs
+ when containers would stop or restart
+
+1.10.0 (2017-01-18)
+-------------------
+
+### New Features
+
+#### Compose file version 3.0
+
+- Introduced version 3.0 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.13 or above and is
+ specifically designed to work with the `docker stack` commands.
+
+#### Compose file version 2.1 and up
+
+- Healthcheck configuration can now be done in the service definition using
+ the `healthcheck` parameter
+
+- Containers dependencies can now be set up to wait on positive healthchecks
+ when declared using `depends_on`. See the documentation for the updated
+ syntax.
+ **Note:** This feature will not be ported to version 3 Compose files.
+
+- Added support for the `sysctls` parameter in service definitions
+
+- Added support for the `userns_mode` parameter in service definitions
+
+- Compose now adds identifying labels to networks and volumes it creates
+
+#### Compose file version 2.0 and up
+
+- Added support for the `stop_grace_period` option in service definitions.
+
+### Bugfixes
+
+- Colored output now works properly on Windows.
+
+- Fixed a bug where docker-compose run would fail to set up link aliases
+ in interactive mode on Windows.
+
+- Networks created by Compose are now always made attachable
+ (Compose files v2.1 and up).
+
+- Fixed a bug where falsy values of `COMPOSE_CONVERT_WINDOWS_PATHS`
+ (`0`, `false`, empty value) were being interpreted as true.
+
+- Fixed a bug where forward slashes in some .dockerignore patterns weren't
+ being parsed correctly on Windows
+
+
+1.9.0 (2016-11-16)
+-----------------
+
+**Breaking changes**
+
+- When using Compose with Docker Toolbox/Machine on Windows, volume paths are
+ no longer converted from `C:\Users` to `/c/Users`-style by default. To
+ re-enable this conversion so that your volumes keep working, set the
+ environment variable `COMPOSE_CONVERT_WINDOWS_PATHS=1`. Users of
+ Docker for Windows are not affected and do not need to set the variable.
+
+New Features
+
+- Interactive mode for `docker-compose run` and `docker-compose exec` is
+ now supported on Windows platforms. Please note that the `docker` binary
+ is required to be present on the system for this feature to work.
+
+- Introduced version 2.1 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.12 or above.
+ - Added support for setting volume labels and network labels in
+ `docker-compose.yml`.
+ - Added support for the `isolation` parameter in service definitions.
+ - Added support for link-local IPs in the service networks definitions.
+ - Added support for shell-style inline defaults in variable interpolation.
+ The supported forms are `${FOO-default}` (fall back if FOO is unset) and
+ `${FOO:-default}` (fall back if FOO is unset or empty).
+
+- Added support for the `group_add` and `oom_score_adj` parameters in
+ service definitions.
+
+- Added support for the `internal` and `enable_ipv6` parameters in network
+ definitions.
+
+- Compose now defaults to using the `npipe` protocol on Windows.
+
+- Overriding a `logging` configuration will now properly merge the `options`
+ mappings if the `driver` values do not conflict.
+
+Bug Fixes
+
+- Fixed several bugs related to `npipe` protocol support on Windows.
+
+- Fixed an issue with Windows paths being incorrectly converted when
+ using Docker on Windows Server.
+
+- Fixed a bug where an empty `restart` value would sometimes result in an
+ exception being raised.
+
+- Fixed an issue where service logs containing unicode characters would
+ sometimes cause an error to occur.
+
+- Fixed a bug where unicode values in environment variables would sometimes
+ raise a unicode exception when retrieved.
+
+- Fixed an issue where Compose would incorrectly detect a configuration
+ mismatch for overlay networks.
+
+
+1.8.1 (2016-09-22)
+-----------------
+
+Bug Fixes
+
+- Fixed a bug where users using a credentials store were not able
+ to access their private images.
+
+- Fixed a bug where users using identity tokens to authenticate
+ were not able to access their private images.
+
+- Fixed a bug where an `HttpHeaders` entry in the docker configuration
+ file would cause Compose to crash when trying to build an image.
+
+- Fixed a few bugs related to the handling of Windows paths in volume
+ binding declarations.
+
+- Fixed a bug where Compose would sometimes crash while trying to
+ read a streaming response from the engine.
+
+- Fixed an issue where Compose would crash when encountering an API error
+ while streaming container logs.
+
+- Fixed an issue where Compose would erroneously try to output logs from
+ drivers not handled by the Engine's API.
+
+- Fixed a bug where options from the `docker-machine config` command would
+ not be properly interpreted by Compose.
+
+- Fixed a bug where the connection to the Docker Engine would
+ sometimes fail when running a large number of services simultaneously.
+
+- Fixed an issue where Compose would sometimes print a misleading
+ suggestion message when running the `bundle` command.
+
+- Fixed a bug where connection errors would not be handled properly by
+ Compose during the project initialization phase.
+
+- Fixed a bug where a misleading error would appear when encountering
+ a connection timeout.
+
+
+1.8.0 (2016-06-14)
+-----------------
+
+**Breaking Changes**
+
+- As announced in 1.7.0, `docker-compose rm` now removes containers
+ created by `docker-compose run` by default.
+
+- Setting `entrypoint` on a service now empties out any default
+ command that was set on the image (i.e. any `CMD` instruction in the
+ Dockerfile used to build it). This makes it consistent with
+ the `--entrypoint` flag to `docker run`.
+
+New Features
+
+- Added `docker-compose bundle`, a command that builds a bundle file
+ to be consumed by the new *Docker Stack* commands in Docker 1.12.
+
+- Added `docker-compose push`, a command that pushes service images
+ to a registry.
+
+- Compose now supports specifying a custom TLS version for
+ interaction with the Docker Engine using the `COMPOSE_TLS_VERSION`
+ environment variable.
+
+Bug Fixes
+
+- Fixed a bug where Compose would erroneously try to read `.env`
+ at the project's root when it is a directory.
+
+- `docker-compose run -e VAR` now passes `VAR` through from the shell
+ to the container, as with `docker run -e VAR`.
+
+- Improved config merging when multiple compose files are involved
+ for several service sub-keys.
+
+- Fixed a bug where volume mappings containing Windows drives would
+ sometimes be parsed incorrectly.
+
+- Fixed a bug in Windows environment where volume mappings of the
+ host's root directory would be parsed incorrectly.
+
+- Fixed a bug where `docker-compose config` would output an invalid
+ Compose file if external networks were specified.
+
+- Fixed an issue where unset buildargs would be assigned a string
+ containing `'None'` instead of the expected empty value.
+
+- Fixed a bug where yes/no prompts on Windows would not show before
+ receiving input.
+
+- Fixed a bug where trying to `docker-compose exec` on Windows
+ without the `-d` option would exit with a stacktrace. This will
+ still fail for the time being, but should do so gracefully.
+
+- Fixed a bug where errors during `docker-compose up` would show
+ an unrelated stacktrace at the end of the process.
+
+- `docker-compose create` and `docker-compose start` show more
+ descriptive error messages when something goes wrong.
+
+
+1.7.1 (2016-05-04)
+-----------------
+
+Bug Fixes
+
+- Fixed a bug where the output of `docker-compose config` for v1 files
+ would be an invalid configuration file.
+
+- Fixed a bug where `docker-compose config` would not check the validity
+ of links.
+
+- Fixed an issue where `docker-compose help` would not output a list of
+ available commands and generic options as expected.
+
+- Fixed an issue where filtering by service when using `docker-compose logs`
+ would not apply for newly created services.
+
+- Fixed a bug where unchanged services would sometimes be recreated in
+ in the up phase when using Compose with Python 3.
+
+- Fixed an issue where API errors encountered during the up phase would
+ not be recognized as a failure state by Compose.
+
+- Fixed a bug where Compose would raise a NameError because of an undefined
+ exception name on non-Windows platforms.
+
+- Fixed a bug where the wrong version of `docker-py` would sometimes be
+ installed alongside Compose.
+
+- Fixed a bug where the host value output by `docker-machine config default`
+ would not be recognized as valid options by the `docker-compose`
+ command line.
+
+- Fixed an issue where Compose would sometimes exit unexpectedly while
+ reading events broadcasted by a Swarm cluster.
+
+- Corrected a statement in the docs about the location of the `.env` file,
+ which is indeed read from the current directory, instead of in the same
+ location as the Compose file.
+
+
+1.7.0 (2016-04-13)
+------------------
+
+**Breaking Changes**
+
+- `docker-compose logs` no longer follows log output by default. It now
+ matches the behaviour of `docker logs` and exits after the current logs
+ are printed. Use `-f` to get the old default behaviour.
+
+- Booleans are no longer allows as values for mappings in the Compose file
+ (for keys `environment`, `labels` and `extra_hosts`). Previously this
+ was a warning. Boolean values should be quoted so they become string values.
+
+New Features
+
+- Compose now looks for a `.env` file in the directory where it's run and
+ reads any environment variables defined inside, if they're not already
+ set in the shell environment. This lets you easily set defaults for
+ variables used in the Compose file, or for any of the `COMPOSE_*` or
+ `DOCKER_*` variables.
+
+- Added a `--remove-orphans` flag to both `docker-compose up` and
+ `docker-compose down` to remove containers for services that were removed
+ from the Compose file.
+
+- Added a `--all` flag to `docker-compose rm` to include containers created
+ by `docker-compose run`. This will become the default behavior in the next
+ version of Compose.
+
+- Added support for all the same TLS configuration flags used by the `docker`
+ client: `--tls`, `--tlscert`, `--tlskey`, etc.
+
+- Compose files now support the `tmpfs` and `shm_size` options.
+
+- Added the `--workdir` flag to `docker-compose run`
+
+- `docker-compose logs` now shows logs for new containers that are created
+ after it starts.
+
+- The `COMPOSE_FILE` environment variable can now contain multiple files,
+ separated by the host system's standard path separator (`:` on Mac/Linux,
+ `;` on Windows).
+
+- You can now specify a static IP address when connecting a service to a
+ network with the `ipv4_address` and `ipv6_address` options.
+
+- Added `--follow`, `--timestamp`, and `--tail` flags to the
+ `docker-compose logs` command.
+
+- `docker-compose up`, and `docker-compose start` will now start containers
+ in parallel where possible.
+
+- `docker-compose stop` now stops containers in reverse dependency order
+ instead of all at once.
+
+- Added the `--build` flag to `docker-compose up` to force it to build a new
+ image. It now shows a warning if an image is automatically built when the
+ flag is not used.
+
+- Added the `docker-compose exec` command for executing a process in a running
+ container.
+
+
+Bug Fixes
+
+- `docker-compose down` now removes containers created by
+ `docker-compose run`.
+
+- A more appropriate error is shown when a timeout is hit during `up` when
+ using a tty.
+
+- Fixed a bug in `docker-compose down` where it would abort if some resources
+ had already been removed.
+
+- Fixed a bug where changes to network aliases would not trigger a service
+ to be recreated.
+
+- Fix a bug where a log message was printed about creating a new volume
+ when it already existed.
+
+- Fixed a bug where interrupting `up` would not always shut down containers.
+
+- Fixed a bug where `log_opt` and `log_driver` were not properly carried over
+ when extending services in the v1 Compose file format.
+
+- Fixed a bug where empty values for build args would cause file validation
+ to fail.
+
+1.6.2 (2016-02-23)
+------------------
+
+- Fixed a bug where connecting to a TLS-enabled Docker Engine would fail with
+ a certificate verification error.
+
+1.6.1 (2016-02-23)
+------------------
+
+Bug Fixes
+
+- Fixed a bug where recreating a container multiple times would cause the
+ new container to be started without the previous volumes.
+
+- Fixed a bug where Compose would set the value of unset environment variables
+ to an empty string, instead of a key without a value.
+
+- Provide a better error message when Compose requires a more recent version
+ of the Docker API.
+
+- Add a missing config field `network.aliases` which allows setting a network
+ scoped alias for a service.
+
+- Fixed a bug where `run` would not start services listed in `depends_on`.
+
+- Fixed a bug where `networks` and `network_mode` where not merged when using
+ extends or multiple Compose files.
+
+- Fixed a bug with service aliases where the short container id alias was
+ only contained 10 characters, instead of the 12 characters used in previous
+ versions.
+
+- Added a missing log message when creating a new named volume.
+
+- Fixed a bug where `build.args` was not merged when using `extends` or
+ multiple Compose files.
+
+- Fixed some bugs with config validation when null values or incorrect types
+ were used instead of a mapping.
+
+- Fixed a bug where a `build` section without a `context` would show a stack
+ trace instead of a helpful validation message.
+
+- Improved compatibility with swarm by only setting a container affinity to
+ the previous instance of a services' container when the service uses an
+ anonymous container volume. Previously the affinity was always set on all
+ containers.
+
+- Fixed the validation of some `driver_opts` would cause an error if a number
+ was used instead of a string.
+
+- Some improvements to the `run.sh` script used by the Compose container install
+ option.
+
+- Fixed a bug with `up --abort-on-container-exit` where Compose would exit,
+ but would not stop other containers.
+
+- Corrected the warning message that is printed when a boolean value is used
+ as a value in a mapping.
+
+
+1.6.0 (2016-01-15)
+------------------
+
+Major Features:
+
+- Compose 1.6 introduces a new format for `docker-compose.yml` which lets
+ you define networks and volumes in the Compose file as well as services. It
+ also makes a few changes to the structure of some configuration options.
+
+ You don't have to use it - your existing Compose files will run on Compose
+ 1.6 exactly as they do today.
+
+ Check the upgrade guide for full details:
+ https://docs.docker.com/compose/compose-file#upgrading
+
+- Support for networking has exited experimental status and is the recommended
+ way to enable communication between containers.
+
+ If you use the new file format, your app will use networking. If you aren't
+ ready yet, just leave your Compose file as it is and it'll continue to work
+ just the same.
+
+ By default, you don't have to configure any networks. In fact, using
+ networking with Compose involves even less configuration than using links.
+ Consult the networking guide for how to use it:
+ https://docs.docker.com/compose/networking
+
+ The experimental flags `--x-networking` and `--x-network-driver`, introduced
+ in Compose 1.5, have been removed.
+
+- You can now pass arguments to a build if you're using the new file format:
+
+ build:
+ context: .
+ args:
+ buildno: 1
+
+- You can now specify both a `build` and an `image` key if you're using the
+ new file format. `docker-compose build` will build the image and tag it with
+ the name you've specified, while `docker-compose pull` will attempt to pull
+ it.
+
+- There's a new `events` command for monitoring container events from
+ the application, much like `docker events`. This is a good primitive for
+ building tools on top of Compose for performing actions when particular
+ things happen, such as containers starting and stopping.
+
+- There's a new `depends_on` option for specifying dependencies between
+ services. This enforces the order of startup, and ensures that when you run
+ `docker-compose up SERVICE` on a service with dependencies, those are started
+ as well.
+
+New Features:
+
+- Added a new command `config` which validates and prints the Compose
+ configuration after interpolating variables, resolving relative paths, and
+ merging multiple files and `extends`.
+
+- Added a new command `create` for creating containers without starting them.
+
+- Added a new command `down` to stop and remove all the resources created by
+ `up` in a single command.
+
+- Added support for the `cpu_quota` configuration option.
+
+- Added support for the `stop_signal` configuration option.
+
+- Commands `start`, `restart`, `pause`, and `unpause` now exit with an
+ error status code if no containers were modified.
+
+- Added a new `--abort-on-container-exit` flag to `up` which causes `up` to
+ stop all container and exit once the first container exits.
+
+- Removed support for `FIG_FILE`, `FIG_PROJECT_NAME`, and no longer reads
+ `fig.yml` as a default Compose file location.
+
+- Removed the `migrate-to-labels` command.
+
+- Removed the `--allow-insecure-ssl` flag.
+
+
+Bug Fixes:
+
+- Fixed a validation bug that prevented the use of a range of ports in
+ the `expose` field.
+
+- Fixed a validation bug that prevented the use of arrays in the `entrypoint`
+ field if they contained duplicate entries.
+
+- Fixed a bug that caused `ulimits` to be ignored when used with `extends`.
+
+- Fixed a bug that prevented ipv6 addresses in `extra_hosts`.
+
+- Fixed a bug that caused `extends` to be ignored when included from
+ multiple Compose files.
+
+- Fixed an incorrect warning when a container volume was defined in
+ the Compose file.
+
+- Fixed a bug that prevented the force shutdown behaviour of `up` and
+ `logs`.
+
+- Fixed a bug that caused `None` to be printed as the network driver name
+ when the default network driver was used.
+
+- Fixed a bug where using the string form of `dns` or `dns_search` would
+ cause an error.
+
+- Fixed a bug where a container would be reported as "Up" when it was
+ in the restarting state.
+
+- Fixed a confusing error message when DOCKER_CERT_PATH was not set properly.
+
+- Fixed a bug where attaching to a container would fail if it was using a
+ non-standard logging driver (or none at all).
+
+
+1.5.2 (2015-12-03)
+------------------
+
+- Fixed a bug which broke the use of `environment` and `env_file` with
+ `extends`, and caused environment keys without values to have a `None`
+ value, instead of a value from the host environment.
+
+- Fixed a regression in 1.5.1 that caused a warning about volumes to be
+ raised incorrectly when containers were recreated.
+
+- Fixed a bug which prevented building a `Dockerfile` that used `ADD <url>`
+
+- Fixed a bug with `docker-compose restart` which prevented it from
+ starting stopped containers.
+
+- Fixed handling of SIGTERM and SIGINT to properly stop containers
+
+- Add support for using a url as the value of `build`
+
+- Improved the validation of the `expose` option
+
+
+1.5.1 (2015-11-12)
+------------------
+
+- Add the `--force-rm` option to `build`.
+
+- Add the `ulimit` option for services in the Compose file.
+
+- Fixed a bug where `up` would error with "service needs to be built" if
+ a service changed from using `image` to using `build`.
+
+- Fixed a bug that would cause incorrect output of parallel operations
+ on some terminals.
+
+- Fixed a bug that prevented a container from being recreated when the
+ mode of a `volumes_from` was changed.
+
+- Fixed a regression in 1.5.0 where non-utf-8 unicode characters would cause
+ `up` or `logs` to crash.
+
+- Fixed a regression in 1.5.0 where Compose would use a success exit status
+ code when a command fails due to an HTTP timeout communicating with the
+ docker daemon.
+
+- Fixed a regression in 1.5.0 where `name` was being accepted as a valid
+ service option which would override the actual name of the service.
+
+- When using `--x-networking` Compose no longer sets the hostname to the
+ container name.
+
+- When using `--x-networking` Compose will only create the default network
+ if at least one container is using the network.
+
+- When printings logs during `up` or `logs`, flush the output buffer after
+ each line to prevent buffering issues from hiding logs.
+
+- Recreate a container if one of its dependencies is being created.
+ Previously a container was only recreated if it's dependencies already
+ existed, but were being recreated as well.
+
+- Add a warning when a `volume` in the Compose file is being ignored
+ and masked by a container volume from a previous container.
+
+- Improve the output of `pull` when run without a tty.
+
+- When using multiple Compose files, validate each before attempting to merge
+ them together. Previously invalid files would result in not helpful errors.
+
+- Allow dashes in keys in the `environment` service option.
+
+- Improve validation error messages by including the filename as part of the
+ error message.
+
+
+1.5.0 (2015-11-03)
+------------------
+
+**Breaking changes:**
+
+With the introduction of variable substitution support in the Compose file, any
+Compose file that uses an environment variable (`$VAR` or `${VAR}`) in the `command:`
+or `entrypoint:` field will break.
+
+Previously these values were interpolated inside the container, with a value
+from the container environment. In Compose 1.5.0, the values will be
+interpolated on the host, with a value from the host environment.
+
+To migrate a Compose file to 1.5.0, escape the variables with an extra `$`
+(ex: `$$VAR` or `$${VAR}`). See
+https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution
+
+Major features:
+
+- Compose is now available for Windows.
+
+- Environment variables can be used in the Compose file. See
+ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution
+
+- Multiple compose files can be specified, allowing you to override
+ settings in the default Compose file. See
+ https://github.com/docker/compose/blob/8cc8e61/docs/reference/docker-compose.md
+ for more details.
+
+- Compose now produces better error messages when a file contains
+ invalid configuration.
+
+- `up` now waits for all services to exit before shutting down,
+ rather than shutting down as soon as one container exits.
+
+- Experimental support for the new docker networking system can be
+ enabled with the `--x-networking` flag. Read more here:
+ https://github.com/docker/docker/blob/8fee1c20/docs/userguide/dockernetworks.md
+
+New features:
+
+- You can now optionally pass a mode to `volumes_from`, e.g.
+ `volumes_from: ["servicename:ro"]`.
+
+- Since Docker now lets you create volumes with names, you can refer to those
+ volumes by name in `docker-compose.yml`. For example,
+ `volumes: ["mydatavolume:/data"]` will mount the volume named
+ `mydatavolume` at the path `/data` inside the container.
+
+ If the first component of an entry in `volumes` starts with a `.`, `/` or
+ `~`, it is treated as a path and expansion of relative paths is performed as
+ necessary. Otherwise, it is treated as a volume name and passed straight
+ through to Docker.
+
+ Read more on named volumes and volume drivers here:
+ https://github.com/docker/docker/blob/244d9c33/docs/userguide/dockervolumes.md
+
+- `docker-compose build --pull` instructs Compose to pull the base image for
+ each Dockerfile before building.
+
+- `docker-compose pull --ignore-pull-failures` instructs Compose to continue
+ if it fails to pull a single service's image, rather than aborting.
+
+- You can now specify an IPC namespace in `docker-compose.yml` with the `ipc`
+ option.
+
+- Containers created by `docker-compose run` can now be named with the
+ `--name` flag.
+
+- If you install Compose with pip or use it as a library, it now works with
+ Python 3.
+
+- `image` now supports image digests (in addition to ids and tags), e.g.
+ `image: "busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d"`
+
+- `ports` now supports ranges of ports, e.g.
+
+ ports:
+ - "3000-3005"
+ - "9000-9001:8000-8001"
+
+- `docker-compose run` now supports a `-p|--publish` parameter, much like
+ `docker run -p`, for publishing specific ports to the host.
+
+- `docker-compose pause` and `docker-compose unpause` have been implemented,
+ analogous to `docker pause` and `docker unpause`.
+
+- When using `extends` to copy configuration from another service in the same
+ Compose file, you can omit the `file` option.
+
+- Compose can be installed and run as a Docker image. This is an experimental
+ feature.
+
+Bug fixes:
+
+- All values for the `log_driver` option which are supported by the Docker
+ daemon are now supported by Compose.
+
+- `docker-compose build` can now be run successfully against a Swarm cluster.
+
+
+1.4.2 (2015-09-22)
+------------------
+
+- Fixed a regression in the 1.4.1 release that would cause `docker-compose up`
+ without the `-d` option to exit immediately.
+
+1.4.1 (2015-09-10)
+------------------
+
+The following bugs have been fixed:
+
+- Some configuration changes (notably changes to `links`, `volumes_from`, and
+ `net`) were not properly triggering a container recreate as part of
+ `docker-compose up`.
+- `docker-compose up <service>` was showing logs for all services instead of
+ just the specified services.
+- Containers with custom container names were showing up in logs as
+ `service_number` instead of their custom container name.
+- When scaling a service sometimes containers would be recreated even when
+ the configuration had not changed.
+
+
+1.4.0 (2015-08-04)
+------------------
+
+- By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications.
+
+ The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything.
+
+- Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications.
+
+- You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container.
+
+- You no longer have to specify a `file` option when using `extends` - it will default to the current file.
+
+- Service names can now contain dots, dashes and underscores.
+
+- Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically:
+
+ $ echo 'redis: {"image": "redis"}' | docker-compose --file - up
+
+- There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies.
+
+- `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver.
+
+- `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`.
+
+- When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`.
+
+- You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`.
+
+- A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect.
+
+- A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry.
+
+- A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing.
+
+- Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash.
+
+Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden!
+
+1.3.3 (2015-07-15)
+------------------
+
+Two regressions have been fixed:
+
+- When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time.
+- Compose would sometimes crash depending on the formatting of container data returned from the Docker API.
+
+1.3.2 (2015-07-14)
+------------------
+
+The following bugs have been fixed:
+
+- When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them.
+- Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail.
+- When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated.
+- `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated.
+- `docker-compose up` would sometimes create two containers with the same numeric suffix.
+- `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed).
+- Some `docker-compose` commands would not show an error if invalid service names were passed in.
+
+Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens!
+
+1.3.1 (2015-06-21)
+------------------
+
+The following bugs have been fixed:
+
+- `docker-compose build` would always attempt to pull the base image before building.
+- `docker-compose help migrate-to-labels` failed with an error.
+- If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode.
+
+1.3.0 (2015-06-18)
+------------------
+
+Firstly, two important notes:
+
+- **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details.
+
+- Compose now requires Docker 1.6.0 or later.
+
+We've done a lot of work in this release to remove hacks and make Compose more stable:
+
+- Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools.
+
+- Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure.
+
+There are some new features:
+
+- `docker-compose up` has an **experimental** new behaviour: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin:
+
+ $ docker-compose up --x-smart-recreate
+
+- When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`.
+
+Several new configuration keys have been added to `docker-compose.yml`:
+
+- `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`.
+- `labels`, like `docker run --labels`, lets you add custom metadata to containers.
+- `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file.
+- `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine.
+- `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in.
+- `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only.
+- `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/engine/reference/run/#security-configuration).
+- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/engine/reference/run/#logging-drivers-log-driver).
+
+Many bugs have been fixed, including the following:
+
+- The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins.
+- A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`.
+- Authenticating against third-party registries would sometimes fail.
+- `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place.
+- `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host.
+- Compose would refuse to create multiple volume entries with the same host path.
+
+Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily!
+
+1.2.0 (2015-04-16)
+------------------
+
+- `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/yml.md#extends).
+
+- When using Compose with a Swarm cluster, containers that depend on one another will be co-scheduled on the same node. This means that most Compose apps will now work out of the box, as long as they don't use `build`.
+
+- Repeated invocations of `docker-compose up` when using Compose with a Swarm cluster now work reliably.
+
+- Directories passed to `build`, filenames passed to `env_file` and volume host paths passed to `volumes` are now treated as relative to the *directory of the configuration file*, not the directory that `docker-compose` is being run in. In the majority of cases, those are the same, but if you use the `-f|--file` argument to specify a configuration file in another directory, **this is a breaking change**.
+
+- A service can now share another service's network namespace with `net: container:<service>`.
+
+- `volumes_from` and `net: container:<service>` entries are taken into account when resolving dependencies, so `docker-compose up <service>` will correctly start all dependencies of `<service>`.
+
+- `docker-compose run` now accepts a `--user` argument to specify a user to run the command as, just like `docker run`.
+
+- The `up`, `stop` and `restart` commands now accept a `--timeout` (or `-t`) argument to specify how long to wait when attempting to gracefully stop containers, just like `docker stop`.
+
+- `docker-compose rm` now accepts `-f` as a shorthand for `--force`, just like `docker rm`.
+
+Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc!
+
+1.1.0 (2015-02-25)
+------------------
+
+Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you:
+
+- The command you type is now `docker-compose`, not `fig`.
+- You should rename your fig.yml to docker-compose.yml.
+- If you’re installing via PyPI, the package is now `docker-compose`, so install it with `pip install docker-compose`.
+
+Besides that, there’s a lot of new stuff in this release:
+
+- We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet.
+
+- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger.
+
+- You can now link to containers outside your app with the `external_links` option in docker-compose.yml.
+
+- You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster.
+
+- If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags.
+
+- `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers.
+
+- docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control.
+
+- docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options.
+
+- Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md
+
+- A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+
+
+Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe!
+
+1.0.1 (2014-11-04)
+------------------
+
+ - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries.
+ - Fixed `fig run` not showing output in Jenkins.
+ - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs.
+
+1.0.0 (2014-10-16)
+------------------
+
+The highlights:
+
+ - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself.
+
+ This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode.
+
+ - Fig can be used with the [official Docker OS X installer](https://docs.docker.com/installation/mac/). Boot2Docker will mount the home directory from your host machine so volumes work as expected.
+
+ - Fig supports Docker 1.3.
+
+ - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables.
+
+ - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`.
+
+ - There is a new `fig pull` command which pulls the latest images for a service.
+
+ - There is a new `fig restart` command which restarts a service's containers.
+
+ - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`).
+
+ This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly.
+
+ - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables.
+
+ - `.dockerignore` is supported when building.
+
+ - The project name can be set with the `FIG_PROJECT_NAME` environment variable.
+
+ - The `--env` and `--entrypoint` options have been added to `fig run`.
+
+ - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy.
+
+Other things:
+
+ - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon.
+ - `--verbose` displays more useful debugging output.
+ - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started.
+ - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout.
+
+Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew.
+
+0.5.2 (2014-07-28)
+------------------
+
+ - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`.
+ - Fixed the `dns:` fig.yml option, which was causing fig to error out.
+ - Fixed a bug where fig couldn't start under Python 2.6.
+ - Fixed a log-streaming bug that occasionally caused fig to exit.
+
+Thanks @dnephin and @marksteve!
+
+
+0.5.1 (2014-07-11)
+------------------
+
+ - If a service has a command defined, `fig run [service]` with no further arguments will run it.
+ - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different)
+ - `volumes_from` now works properly with containers as well as services
+ - Fixed a race condition when recreating containers in `fig up`
+
+Thanks @ryanbrainard and @d11wtq!
+
+
+0.5.0 (2014-07-11)
+------------------
+
+ - Fig now starts links when you run `fig run` or `fig up`.
+
+ For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service.
+
+ - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved:
+ ```
+ environment:
+ RACK_ENV: development
+ SESSION_SECRET:
+ ```
+
+ - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted:
+
+ ```
+ volumes_from:
+ - service_name
+ - container_name
+ ```
+
+ - A host address can now be specified in `ports`:
+
+ ```
+ ports:
+ - "0.0.0.0:8000:8000"
+ - "127.0.0.1:8001:8001"
+ ```
+
+ - The `net` and `workdir` options are now supported in `fig.yml`.
+ - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option.
+ - TTY behaviour is far more robust, and resizes are supported correctly.
+ - Load YAML files safely.
+
+Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release!
+
+
+0.4.2 (2014-06-18)
+------------------
+
+ - Fix various encoding errors when using `fig run`, `fig up` and `fig build`.
+
+0.4.1 (2014-05-08)
+------------------
+
+ - Add support for Docker 0.11.0. (Thanks @marksteve!)
+ - Make project name configurable. (Thanks @jefmathiot!)
+ - Return correct exit code from `fig run`.
+
+0.4.0 (2014-04-29)
+------------------
+
+ - Support Docker 0.9 and 0.10
+ - Display progress bars correctly when pulling images (no more ski slopes)
+ - `fig up` now stops all services when any container exits
+ - Added support for the `privileged` config option in fig.yml (thanks @kvz!)
+ - Shortened and aligned log prefixes in `fig up` output
+ - Only containers started with `fig run` link back to their own service
+ - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!)
+ - Error message improvements
+
+0.3.2 (2014-03-05)
+------------------
+
+ - Added an `--rm` option to `fig run`. (Thanks @marksteve!)
+ - Added an `expose` option to `fig.yml`.
+
+0.3.1 (2014-03-04)
+------------------
+
+ - Added contribution instructions. (Thanks @kvz!)
+ - Fixed `fig rm` throwing an error.
+ - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command.
+
+0.3.0 (2014-03-03)
+------------------
+
+ - We now ship binaries for OS X and Linux. No more having to install with Pip!
+ - Add `-f` flag to specify alternate `fig.yml` files
+ - Add support for custom link names
+ - Fix a bug where recreating would sometimes hang
+ - Update docker-py to support Docker 0.8.0.
+ - Various documentation improvements
+ - Various error message improvements
+
+Thanks @marksteve, @Gazler and @teozkr!
+
+0.2.2 (2014-02-17)
+------------------
+
+ - Resolve dependencies using Cormen/Tarjan topological sort
+ - Fix `fig up` not printing log output
+ - Stop containers in reverse order to starting
+ - Fix scale command not binding ports
+
+Thanks to @barnybug and @dustinlacewell for their work on this release.
+
+0.2.1 (2014-02-04)
+------------------
+
+ - General improvements to error reporting (#77, #79)
+
+0.2.0 (2014-01-31)
+------------------
+
+ - Link services to themselves so run commands can access the running service. (#67)
+ - Much better documentation.
+ - Make service dependency resolution more reliable. (#48)
+ - Load Fig configurations with a `.yaml` extension. (#58)
+
+Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release.
+
+0.1.4 (2014-01-27)
+------------------
+
+ - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54)
+
+0.1.3 (2014-01-23)
+------------------
+
+ - Fix ports sometimes being configured incorrectly. (#46)
+ - Fix log output sometimes not displaying. (#47)
+
+0.1.2 (2014-01-22)
+------------------
+
+ - Add `-T` option to `fig run` to disable pseudo-TTY. (#34)
+ - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske!
+ - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40)
+
+0.1.1 (2014-01-17)
+------------------
+
+ - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell!
+
+0.1.0 (2014-01-16)
+------------------
+
+ - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2)
+ - Add `fig scale` command (#9)
+ - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19)
+ - Truncate long commands in `fig ps` (#18)
+ - Fill out CLI help banners for commands (#15, #16)
+ - Show a friendlier error when `fig.yml` is missing (#4)
+ - Fix bug with `fig build` logging (#3)
+ - Fix bug where builds would time out if a step took a long time without generating output (#6)
+ - Fix bug where streaming container output over the Unix socket raised an error (#7)
+
+Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt.
+
+0.0.2 (2014-01-02)
+------------------
+
+ - Improve documentation
+ - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`.
+ - Improve `fig up` behaviour
+ - Add confirmation prompt to `fig rm`
+ - Add `fig build` command
+
+0.0.1 (2013-12-20)
+------------------
+
+Initial release.
diff --git a/CHANGES.md b/CHANGES.md
new file mode 120000
index 00000000..83b69470
--- /dev/null
+++ b/CHANGES.md
@@ -0,0 +1 @@
+CHANGELOG.md \ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..16bccf98
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,74 @@
+# Contributing to Compose
+
+Compose is a part of the Docker project, and follows the same rules and
+principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md)
+to get an overview.
+
+## TL;DR
+
+Pull requests will need:
+
+ - Tests
+ - Documentation
+ - [To be signed off](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work)
+ - A logical series of [well written commits](https://github.com/alphagov/styleguides/blob/master/git.md)
+
+## Development environment
+
+If you're looking contribute to Compose
+but you're new to the project or maybe even to Python, here are the steps
+that should get you started.
+
+1. Fork [https://github.com/docker/compose](https://github.com/docker/compose)
+ to your username.
+2. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`.
+3. You must [configure a remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) for your fork so that you can [sync changes you make](https://help.github.com/articles/syncing-a-fork/) with the original repository.
+4. Enter the local directory `cd compose`.
+5. Set up a development environment by running `python setup.py develop`. This
+ will install the dependencies and set up a symlink from your `docker-compose`
+ executable to the checkout of the repository. When you now run
+ `docker-compose` from anywhere on your machine, it will run your development
+ version of Compose.
+
+## Install pre-commit hooks
+
+This step is optional, but recommended. Pre-commit hooks will run style checks
+and in some cases fix style issues for you, when you commit code.
+
+Install the git pre-commit hooks using [tox](https://tox.readthedocs.io) by
+running `tox -e pre-commit` or by following the
+[pre-commit install guide](http://pre-commit.com/#install).
+
+To run the style checks at any time run `tox -e pre-commit`.
+
+## Submitting a pull request
+
+See Docker's [basic contribution workflow](https://docs.docker.com/opensource/workflow/make-a-contribution/#the-basic-contribution-workflow) for a guide on how to submit a pull request for code or documentation.
+
+## Running the test suite
+
+Use the test script to run linting checks and then the full test suite against
+different Python interpreters:
+
+ $ script/test/default
+
+Tests are run against a Docker daemon inside a container, so that we can test
+against multiple Docker versions. By default they'll run against only the latest
+Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run
+against all supported versions:
+
+ $ DOCKER_VERSIONS=all script/test/default
+
+Arguments to `script/test/default` are passed through to the `tox` executable, so
+you can specify a test directory, file, module, class or method:
+
+ $ script/test/default tests/unit
+ $ script/test/default tests/unit/cli_test.py
+ $ script/test/default tests/unit/config_test.py::ConfigTest
+ $ script/test/default tests/unit/config_test.py::ConfigTest::test_load
+
+## Finding things to work on
+
+We use a [ZenHub board](https://www.zenhub.io/) to keep track of specific things we are working on and planning to work on. If you're looking for things to work on, stuff in the backlog is a great place to start.
+
+For more information about our project planning, take a look at our [GitHub wiki](https://github.com/docker/compose/wiki).
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..154d5151
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,84 @@
+FROM debian:wheezy
+
+RUN set -ex; \
+ apt-get update -qq; \
+ apt-get install -y \
+ locales \
+ gcc \
+ make \
+ zlib1g \
+ zlib1g-dev \
+ libssl-dev \
+ git \
+ ca-certificates \
+ curl \
+ libsqlite3-dev \
+ libbz2-dev \
+ ; \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 \
+ -o /usr/local/bin/docker && \
+ SHA256=f024bc65c45a3778cf07213d26016075e8172de8f6e4b5702bedde06c241650f; \
+ echo "${SHA256} /usr/local/bin/docker" | sha256sum -c - && \
+ chmod +x /usr/local/bin/docker
+
+# Build Python 2.7.13 from source
+RUN set -ex; \
+ curl -LO https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz && \
+ SHA256=a4f05a0720ce0fd92626f0278b6b433eee9a6173ddf2bced7957dfb599a5ece1; \
+ echo "${SHA256} Python-2.7.13.tgz" | sha256sum -c - && \
+ tar -xzf Python-2.7.13.tgz; \
+ cd Python-2.7.13; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-2.7.13; \
+ rm Python-2.7.13.tgz
+
+# Build python 3.4 from source
+RUN set -ex; \
+ curl -LO https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz && \
+ SHA256=fe59daced99549d1d452727c050ae486169e9716a890cffb0d468b376d916b48; \
+ echo "${SHA256} Python-3.4.6.tgz" | sha256sum -c - && \
+ tar -xzf Python-3.4.6.tgz; \
+ cd Python-3.4.6; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-3.4.6; \
+ rm Python-3.4.6.tgz
+
+# Make libpython findable
+ENV LD_LIBRARY_PATH /usr/local/lib
+
+# Install pip
+RUN set -ex; \
+ curl -LO https://bootstrap.pypa.io/get-pip.py && \
+ SHA256=19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c; \
+ echo "${SHA256} get-pip.py" | sha256sum -c - && \
+ python get-pip.py
+
+# Python3 requires a valid locale
+RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
+ENV LANG en_US.UTF-8
+
+RUN useradd -d /home/user -m -s /bin/bash user
+WORKDIR /code/
+
+RUN pip install tox==2.1.1
+
+ADD requirements.txt /code/
+ADD requirements-dev.txt /code/
+ADD .pre-commit-config.yaml /code/
+ADD setup.py /code/
+ADD tox.ini /code/
+ADD compose /code/compose/
+RUN tox --notest
+
+ADD . /code/
+RUN chown -R user /code/
+
+ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
new file mode 100644
index 00000000..9fd69715
--- /dev/null
+++ b/Dockerfile.armhf
@@ -0,0 +1,71 @@
+FROM armhf/debian:wheezy
+
+RUN set -ex; \
+ apt-get update -qq; \
+ apt-get install -y \
+ locales \
+ gcc \
+ make \
+ zlib1g \
+ zlib1g-dev \
+ libssl-dev \
+ git \
+ ca-certificates \
+ curl \
+ libsqlite3-dev \
+ libbz2-dev \
+ ; \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl https://get.docker.com/builds/Linux/armel/docker-1.8.3 \
+ -o /usr/local/bin/docker && \
+ chmod +x /usr/local/bin/docker
+
+# Build Python 2.7.13 from source
+RUN set -ex; \
+ curl -L https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz | tar -xz; \
+ cd Python-2.7.13; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-2.7.13
+
+# Build python 3.4 from source
+RUN set -ex; \
+ curl -L https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz | tar -xz; \
+ cd Python-3.4.6; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-3.4.6
+
+# Make libpython findable
+ENV LD_LIBRARY_PATH /usr/local/lib
+
+# Install pip
+RUN set -ex; \
+ curl -L https://bootstrap.pypa.io/get-pip.py | python
+
+# Python3 requires a valid locale
+RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
+ENV LANG en_US.UTF-8
+
+RUN useradd -d /home/user -m -s /bin/bash user
+WORKDIR /code/
+
+RUN pip install tox==2.1.1
+
+ADD requirements.txt /code/
+ADD requirements-dev.txt /code/
+ADD .pre-commit-config.yaml /code/
+ADD setup.py /code/
+ADD tox.ini /code/
+ADD compose /code/compose/
+RUN tox --notest
+
+ADD . /code/
+RUN chown -R user /code/
+
+ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
diff --git a/Dockerfile.run b/Dockerfile.run
new file mode 100644
index 00000000..5d246e9e
--- /dev/null
+++ b/Dockerfile.run
@@ -0,0 +1,14 @@
+FROM alpine:3.4
+
+ENV GLIBC 2.23-r3
+
+RUN apk update && apk add --no-cache openssl ca-certificates && \
+ wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
+ wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
+ apk add --no-cache glibc-$GLIBC.apk && rm glibc-$GLIBC.apk && \
+ ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
+ ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib
+
+COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
+
+ENTRYPOINT ["docker-compose"]
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
new file mode 100644
index 00000000..3b19bb39
--- /dev/null
+++ b/Dockerfile.s390x
@@ -0,0 +1,15 @@
+FROM s390x/alpine:3.6
+
+ARG COMPOSE_VERSION=1.16.1
+
+RUN apk add --update --no-cache \
+ python \
+ py-pip \
+ && pip install --no-cache-dir docker-compose==$COMPOSE_VERSION \
+ && rm -rf /var/cache/apk/*
+
+WORKDIR /data
+VOLUME /data
+
+
+ENTRYPOINT ["docker-compose"]
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 00000000..51136b1f
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,64 @@
+#!groovy
+
+def image
+
+def buildImage = { ->
+ wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
+ stage("build image") {
+ checkout(scm)
+ def imageName = "dockerbuildbot/compose:${gitCommit()}"
+ image = docker.image(imageName)
+ try {
+ image.pull()
+ } catch (Exception exc) {
+ image = docker.build(imageName, ".")
+ image.push()
+ }
+ }
+ }
+}
+
+def runTests = { Map settings ->
+ def dockerVersions = settings.get("dockerVersions", null)
+ def pythonVersions = settings.get("pythonVersions", null)
+
+ if (!pythonVersions) {
+ throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py34')`")
+ }
+ if (!dockerVersions) {
+ throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
+ }
+
+ { ->
+ wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
+ stage("test python=${pythonVersions} / docker=${dockerVersions}") {
+ checkout(scm)
+ def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
+ echo "Using local system's storage driver: ${storageDriver}"
+ sh """docker run \\
+ -t \\
+ --rm \\
+ --privileged \\
+ --volume="\$(pwd)/.git:/code/.git" \\
+ --volume="/var/run/docker.sock:/var/run/docker.sock" \\
+ -e "TAG=${image.id}" \\
+ -e "STORAGE_DRIVER=${storageDriver}" \\
+ -e "DOCKER_VERSIONS=${dockerVersions}" \\
+ -e "BUILD_NUMBER=\$BUILD_TAG" \\
+ -e "PY_TEST_VERSIONS=${pythonVersions}" \\
+ --entrypoint="script/ci" \\
+ ${image.id} \\
+ --verbose
+ """
+ }
+ }
+ }
+}
+
+buildImage()
+// TODO: break this out into meaningful "DOCKER_VERSIONS" values instead of all
+parallel(
+ failFast: true,
+ all_py27: runTests(pythonVersions: "py27", dockerVersions: "all"),
+ all_py34: runTests(pythonVersions: "py34", dockerVersions: "all"),
+)
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..27448585
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MAINTAINERS b/MAINTAINERS
new file mode 100644
index 00000000..89f5b412
--- /dev/null
+++ b/MAINTAINERS
@@ -0,0 +1,52 @@
+# Compose maintainers file
+#
+# This file describes who runs the docker/compose project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "aanand",
+ "bfirsh",
+ "dnephin",
+ "mnowster",
+ "shin-",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.aanand]
+ Name = "Aanand Prasad"
+ Email = "aanand.prasad@gmail.com"
+ GitHub = "aanand"
+
+ [people.bfirsh]
+ Name = "Ben Firshman"
+ Email = "ben@firshman.co.uk"
+ GitHub = "bfirsh"
+
+ [people.dnephin]
+ Name = "Daniel Nephin"
+ Email = "dnephin@gmail.com"
+ GitHub = "dnephin"
+
+ [people.mnowster]
+ Name = "Mazz Mosley"
+ Email = "mazz@houseofmnowster.com"
+ GitHub = "mnowster"
+
+ [People.shin-]
+ Name = "Joffrey F"
+ Email = "joffrey@docker.com"
+ GitHub = "shin-"
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..8c6f932b
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,15 @@
+include Dockerfile
+include LICENSE
+include requirements.txt
+include requirements-dev.txt
+include tox.ini
+include *.md
+exclude README.md
+include README.rst
+include compose/config/*.json
+include compose/GITSHA
+recursive-include contrib/completion *
+recursive-include tests *
+global-exclude *.pyc
+global-exclude *.pyo
+global-exclude *.un~
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..e3ca8f83
--- /dev/null
+++ b/README.md
@@ -0,0 +1,65 @@
+Docker Compose
+==============
+![Docker Compose](logo.png?raw=true "Docker Compose Logo")
+
+Compose is a tool for defining and running multi-container Docker applications.
+With Compose, you use a Compose file to configure your application's services.
+Then, using a single command, you create and start all the services
+from your configuration. To learn more about all the features of Compose
+see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#features).
+
+Compose is great for development, testing, and staging environments, as well as
+CI workflows. You can learn more about each case in
+[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#common-use-cases).
+
+Using Compose is basically a three-step process.
+
+1. Define your app's environment with a `Dockerfile` so it can be
+reproduced anywhere.
+2. Define the services that make up your app in `docker-compose.yml` so
+they can be run together in an isolated environment.
+3. Lastly, run `docker-compose up` and Compose will start and run your entire app.
+
+A `docker-compose.yml` looks like this:
+
+ version: '2'
+
+ services:
+ web:
+ build: .
+ ports:
+ - "5000:5000"
+ volumes:
+ - .:/code
+ redis:
+ image: redis
+
+For more information about the Compose file, see the
+[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
+
+Compose has commands for managing the whole lifecycle of your application:
+
+ * Start, stop and rebuild services
+ * View the status of running services
+ * Stream the log output of running services
+ * Run a one-off command on a service
+
+Installation and documentation
+------------------------------
+
+- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
+- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
+- Code repository for Compose is on [Github](https://github.com/docker/compose)
+- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
+
+Contributing
+------------
+
+[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=docker/compose/master)](https://jenkins.dockerproject.org/job/docker/job/compose/job/master/)
+
+Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md).
+
+Releasing
+---------
+
+Releases are built by maintainers, following an outline of the [release process](https://github.com/docker/compose/blob/master/project/RELEASE-PROCESS.md).
diff --git a/ROADMAP.md b/ROADMAP.md
new file mode 100644
index 00000000..c2184e56
--- /dev/null
+++ b/ROADMAP.md
@@ -0,0 +1,32 @@
+# Roadmap
+
+## An even better tool for development environments
+
+Compose is a great tool for development environments, but it could be even better. For example:
+
+- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
+
+## More than just development environments
+
+Compose currently works really well in development, but we want to make the Compose file format better for test, staging, and production environments. To support these use cases, there will need to be improvements to the file format, improvements to the command-line tool, integrations with other tools, and perhaps new tools altogether.
+
+Some specific things we are considering:
+
+- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
+ - It should roll back to a known good state if it fails.
+ - It should allow a user to check the actions it is about to perform before running them.
+- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
+- Compose should recommend a technique for zero-downtime deploys. ([#1786](https://github.com/docker/compose/issues/1786))
+- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
+
+## Integration with Swarm
+
+Compose should integrate really well with Swarm so you can take an application you've developed on your laptop and run it on a Swarm cluster.
+
+The current state of integration is documented in [SWARM.md](SWARM.md).
+
+## Applications spanning multiple teams
+
+Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well.
+
+There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318).
diff --git a/SWARM.md b/SWARM.md
new file mode 100644
index 00000000..c6f378a9
--- /dev/null
+++ b/SWARM.md
@@ -0,0 +1 @@
+This file has moved to: https://docs.docker.com/compose/swarm/
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 00000000..e4f39544
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,24 @@
+
+version: '{branch}-{build}'
+
+install:
+ - "SET PATH=C:\\Python27-x64;C:\\Python27-x64\\Scripts;%PATH%"
+ - "python --version"
+ - "pip install tox==2.1.1 virtualenv==13.1.2"
+
+# Build the binary after tests
+build: false
+
+test_script:
+ - "tox -e py27,py34 -- tests/unit"
+ - ps: ".\\script\\build\\windows.ps1"
+
+artifacts:
+ - path: .\dist\docker-compose-Windows-x86_64.exe
+ name: "Compose Windows binary"
+
+deploy:
+ - provider: Environment
+ name: master-builds
+ on:
+ branch: master
diff --git a/bin/docker-compose b/bin/docker-compose
new file mode 100755
index 00000000..aeb53870
--- /dev/null
+++ b/bin/docker-compose
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose.cli.main import main
+main()
diff --git a/compose/__init__.py b/compose/__init__.py
new file mode 100644
index 00000000..20392ec9
--- /dev/null
+++ b/compose/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+__version__ = '1.17.1'
diff --git a/compose/__main__.py b/compose/__main__.py
new file mode 100644
index 00000000..27a7acbb
--- /dev/null
+++ b/compose/__main__.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose.cli.main import main
+
+main()
diff --git a/compose/bundle.py b/compose/bundle.py
new file mode 100644
index 00000000..937a3708
--- /dev/null
+++ b/compose/bundle.py
@@ -0,0 +1,258 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+
+import six
+from docker.utils import split_command
+from docker.utils.ports import split_port
+
+from .cli.errors import UserError
+from .config.serialize import denormalize_config
+from .network import get_network_defs_for_service
+from .service import format_environment
+from .service import NoSuchImageError
+from .service import parse_repository_tag
+
+
+log = logging.getLogger(__name__)
+
+
+SERVICE_KEYS = {
+ 'working_dir': 'WorkingDir',
+ 'user': 'User',
+ 'labels': 'Labels',
+}
+
+IGNORED_KEYS = {'build'}
+
+SUPPORTED_KEYS = {
+ 'image',
+ 'ports',
+ 'expose',
+ 'networks',
+ 'command',
+ 'environment',
+ 'entrypoint',
+} | set(SERVICE_KEYS)
+
+VERSION = '0.1'
+
+
+class NeedsPush(Exception):
+ def __init__(self, image_name):
+ self.image_name = image_name
+
+
+class NeedsPull(Exception):
+ def __init__(self, image_name, service_name):
+ self.image_name = image_name
+ self.service_name = service_name
+
+
+class MissingDigests(Exception):
+ def __init__(self, needs_push, needs_pull):
+ self.needs_push = needs_push
+ self.needs_pull = needs_pull
+
+
+def serialize_bundle(config, image_digests):
+ return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
+
+
+def get_image_digests(project, allow_push=False):
+ digests = {}
+ needs_push = set()
+ needs_pull = set()
+
+ for service in project.services:
+ try:
+ digests[service.name] = get_image_digest(
+ service,
+ allow_push=allow_push,
+ )
+ except NeedsPush as e:
+ needs_push.add(e.image_name)
+ except NeedsPull as e:
+ needs_pull.add(e.service_name)
+
+ if needs_push or needs_pull:
+ raise MissingDigests(needs_push, needs_pull)
+
+ return digests
+
+
+def get_image_digest(service, allow_push=False):
+ if 'image' not in service.options:
+ raise UserError(
+ "Service '{s.name}' doesn't define an image tag. An image name is "
+ "required to generate a proper image digest for the bundle. Specify "
+ "an image repo and tag with the 'image' option.".format(s=service))
+
+ _, _, separator = parse_repository_tag(service.options['image'])
+ # Compose file already uses a digest, no lookup required
+ if separator == '@':
+ return service.options['image']
+
+ try:
+ image = service.image()
+ except NoSuchImageError:
+ action = 'build' if 'build' in service.options else 'pull'
+ raise UserError(
+ "Image not found for service '{service}'. "
+ "You might need to run `docker-compose {action} {service}`."
+ .format(service=service.name, action=action))
+
+ if image['RepoDigests']:
+ # TODO: pick a digest based on the image tag if there are multiple
+ # digests
+ return image['RepoDigests'][0]
+
+ if 'build' not in service.options:
+ raise NeedsPull(service.image_name, service.name)
+
+ if not allow_push:
+ raise NeedsPush(service.image_name)
+
+ return push_image(service)
+
+
+def push_image(service):
+ try:
+ digest = service.push()
+ except Exception:
+ log.error(
+ "Failed to push image for service '{s.name}'. Please use an "
+ "image tag that can be pushed to a Docker "
+ "registry.".format(s=service))
+ raise
+
+ if not digest:
+ raise ValueError("Failed to get digest for %s" % service.name)
+
+ repo, _, _ = parse_repository_tag(service.options['image'])
+ identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
+
+ # only do this if RepoDigests isn't already populated
+ image = service.image()
+ if not image['RepoDigests']:
+ # Pull by digest so that image['RepoDigests'] is populated for next time
+ # and we don't have to pull/push again
+ service.client.pull(identifier)
+ log.info("Stored digest for {}".format(service.image_name))
+
+ return identifier
+
+
+def to_bundle(config, image_digests):
+ if config.networks:
+ log.warn("Unsupported top level key 'networks' - ignoring")
+
+ if config.volumes:
+ log.warn("Unsupported top level key 'volumes' - ignoring")
+
+ config = denormalize_config(config)
+
+ return {
+ 'Version': VERSION,
+ 'Services': {
+ name: convert_service_to_bundle(
+ name,
+ service_dict,
+ image_digests[name],
+ )
+ for name, service_dict in config['services'].items()
+ },
+ }
+
+
+def convert_service_to_bundle(name, service_dict, image_digest):
+ container_config = {'Image': image_digest}
+
+ for key, value in service_dict.items():
+ if key in IGNORED_KEYS:
+ continue
+
+ if key not in SUPPORTED_KEYS:
+ log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
+ continue
+
+ if key == 'environment':
+ container_config['Env'] = format_environment({
+ envkey: envvalue for envkey, envvalue in value.items()
+ if envvalue
+ })
+ continue
+
+ if key in SERVICE_KEYS:
+ container_config[SERVICE_KEYS[key]] = value
+ continue
+
+ set_command_and_args(
+ container_config,
+ service_dict.get('entrypoint', []),
+ service_dict.get('command', []))
+ container_config['Networks'] = make_service_networks(name, service_dict)
+
+ ports = make_port_specs(service_dict)
+ if ports:
+ container_config['Ports'] = ports
+
+ return container_config
+
+
+# See https://github.com/docker/swarmkit/blob/agent/exec/container/container.go#L95
+def set_command_and_args(config, entrypoint, command):
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if entrypoint:
+ config['Command'] = entrypoint + command
+ return
+
+ if command:
+ config['Args'] = command
+
+
+def make_service_networks(name, service_dict):
+ networks = []
+
+ for network_name, network_def in get_network_defs_for_service(service_dict).items():
+ for key in network_def.keys():
+ log.warn(
+ "Unsupported key '{}' in services.{}.networks.{} - ignoring"
+ .format(key, name, network_name))
+
+ networks.append(network_name)
+
+ return networks
+
+
+def make_port_specs(service_dict):
+ ports = []
+
+ internal_ports = [
+ internal_port
+ for port_def in service_dict.get('ports', [])
+ for internal_port in split_port(port_def)[0]
+ ]
+
+ internal_ports += service_dict.get('expose', [])
+
+ for internal_port in internal_ports:
+ spec = make_port_spec(internal_port)
+ if spec not in ports:
+ ports.append(spec)
+
+ return ports
+
+
+def make_port_spec(value):
+ components = six.text_type(value).partition('/')
+ return {
+ 'Protocol': components[2] or 'tcp',
+ 'Port': int(components[0]),
+ }
diff --git a/compose/cli/__init__.py b/compose/cli/__init__.py
new file mode 100644
index 00000000..2574a311
--- /dev/null
+++ b/compose/cli/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import subprocess
+import sys
+
+# Attempt to detect https://github.com/docker/compose/issues/4344
+try:
+ # We don't try importing pip because it messes with package imports
+ # on some Linux distros (Ubuntu, Fedora)
+ # https://github.com/docker/compose/issues/4425
+ # https://github.com/docker/compose/issues/4481
+ # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
+ env = os.environ.copy()
+ env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
+
+ s_cmd = subprocess.Popen(
+ # DO NOT replace this call with a `sys.executable` call. It breaks the binary
+ # distribution (with the binary calling itself recursively over and over).
+ ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ env=env
+ )
+ packages = s_cmd.communicate()[0].splitlines()
+ dockerpy_installed = len(
+ list(filter(lambda p: p.startswith(b'docker-py=='), packages))
+ ) > 0
+ if dockerpy_installed:
+ from .colors import yellow
+ print(
+ yellow('WARNING:'),
+ "Dependency conflict: an older version of the 'docker-py' package "
+ "may be polluting the namespace. "
+ "If you're experiencing crashes, run the following command to remedy the issue:\n"
+ "pip uninstall docker-py; pip uninstall docker; pip install docker",
+ file=sys.stderr
+ )
+
+except OSError:
+ # pip command is not available, which indicates it's probably the binary
+ # distribution of Compose which is not affected
+ pass
+except UnicodeDecodeError:
+ # ref: https://github.com/docker/compose/issues/4663
+ # This could be caused by a number of things, but it seems to be a
+ # python 2 + MacOS interaction. It's not ideal to ignore this, but at least
+ # it doesn't make the program unusable.
+ pass
diff --git a/compose/cli/colors.py b/compose/cli/colors.py
new file mode 100644
index 00000000..cb30e361
--- /dev/null
+++ b/compose/cli/colors.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from ..const import IS_WINDOWS_PLATFORM
+
+NAMES = [
+ 'grey',
+ 'red',
+ 'green',
+ 'yellow',
+ 'blue',
+ 'magenta',
+ 'cyan',
+ 'white'
+]
+
+
+def get_pairs():
+ for i, name in enumerate(NAMES):
+ yield(name, str(30 + i))
+ yield('intense_' + name, str(30 + i) + ';1')
+
+
+def ansi(code):
+ return '\033[{0}m'.format(code)
+
+
+def ansi_color(code, s):
+ return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
+
+
+def make_color_fn(code):
+ return lambda s: ansi_color(code, s)
+
+
+if IS_WINDOWS_PLATFORM:
+ import colorama
+ colorama.init(strip=False)
+for (name, code) in get_pairs():
+ globals()[name] = make_color_fn(code)
+
+
+def rainbow():
+ cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
+ 'intense_cyan', 'intense_yellow', 'intense_green',
+ 'intense_magenta', 'intense_red', 'intense_blue']
+
+ for c in cs:
+ yield globals()[c]
diff --git a/compose/cli/command.py b/compose/cli/command.py
new file mode 100644
index 00000000..e1ae690c
--- /dev/null
+++ b/compose/cli/command.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import re
+
+import six
+
+from . import errors
+from . import verbose_proxy
+from .. import config
+from ..config.environment import Environment
+from ..const import API_VERSIONS
+from ..project import Project
+from .docker_client import docker_client
+from .docker_client import get_tls_version
+from .docker_client import tls_config_from_options
+from .utils import get_version_info
+
+log = logging.getLogger(__name__)
+
+
+def project_from_options(project_dir, options):
+ environment = Environment.from_env_file(project_dir)
+ host = options.get('--host')
+ if host is not None:
+ host = host.lstrip('=')
+ return get_project(
+ project_dir,
+ get_config_path_from_options(project_dir, options, environment),
+ project_name=options.get('--project-name'),
+ verbose=options.get('--verbose'),
+ host=host,
+ tls_config=tls_config_from_options(options),
+ environment=environment,
+ override_dir=options.get('--project-directory'),
+ )
+
+
+def get_config_from_options(base_dir, options):
+ environment = Environment.from_env_file(base_dir)
+ config_path = get_config_path_from_options(
+ base_dir, options, environment
+ )
+ return config.load(
+ config.find(base_dir, config_path, environment)
+ )
+
+
+def get_config_path_from_options(base_dir, options, environment):
+ def unicode_paths(paths):
+ return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
+
+ file_option = options.get('--file')
+ if file_option:
+ return unicode_paths(file_option)
+
+ config_files = environment.get('COMPOSE_FILE')
+ if config_files:
+ pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
+ return unicode_paths(config_files.split(pathsep))
+ return None
+
+
+def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
+ tls_version=None):
+
+ client = docker_client(
+ version=version, tls_config=tls_config, host=host,
+ environment=environment, tls_version=get_tls_version(environment)
+ )
+ if verbose:
+ version_info = six.iteritems(client.version())
+ log.info(get_version_info('full'))
+ log.info("Docker base_url: %s", client.base_url)
+ log.info("Docker version: %s",
+ ", ".join("%s=%s" % item for item in version_info))
+ return verbose_proxy.VerboseProxy('docker', client)
+ return client
+
+
+def get_project(project_dir, config_path=None, project_name=None, verbose=False,
+ host=None, tls_config=None, environment=None, override_dir=None):
+ if not environment:
+ environment = Environment.from_env_file(project_dir)
+ config_details = config.find(project_dir, config_path, environment, override_dir)
+ project_name = get_project_name(
+ config_details.working_dir, project_name, environment
+ )
+ config_data = config.load(config_details)
+
+ api_version = environment.get(
+ 'COMPOSE_API_VERSION',
+ API_VERSIONS[config_data.version])
+
+ client = get_client(
+ verbose=verbose, version=api_version, tls_config=tls_config,
+ host=host, environment=environment
+ )
+
+ with errors.handle_connection_errors(client):
+ return Project.from_config(project_name, config_data, client)
+
+
+def get_project_name(working_dir, project_name=None, environment=None):
+ def normalize_name(name):
+ return re.sub(r'[^a-z0-9]', '', name.lower())
+
+ if not environment:
+ environment = Environment.from_env_file(working_dir)
+ project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
+ if project_name:
+ return normalize_name(project_name)
+
+ project = os.path.basename(os.path.abspath(working_dir))
+ if project:
+ return normalize_name(project)
+
+ return 'default'
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
new file mode 100644
index 00000000..44c7ad91
--- /dev/null
+++ b/compose/cli/docker_client.py
@@ -0,0 +1,95 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import ssl
+
+from docker import APIClient
+from docker.errors import TLSParameterError
+from docker.tls import TLSConfig
+from docker.utils import kwargs_from_env
+
+from ..const import HTTP_TIMEOUT
+from .errors import UserError
+from .utils import generate_user_agent
+from .utils import unquote_path
+
+log = logging.getLogger(__name__)
+
+
+def get_tls_version(environment):
+ compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
+ if not compose_tls_version:
+ return None
+
+ tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
+ if not hasattr(ssl, tls_attr_name):
+ log.warn(
+ 'The "{}" protocol is unavailable. You may need to update your '
+ 'version of Python or OpenSSL. Falling back to TLSv1 (default).'
+ .format(compose_tls_version)
+ )
+ return None
+
+ return getattr(ssl, tls_attr_name)
+
+
+def tls_config_from_options(options, environment=None):
+ tls = options.get('--tls', False)
+ ca_cert = unquote_path(options.get('--tlscacert'))
+ cert = unquote_path(options.get('--tlscert'))
+ key = unquote_path(options.get('--tlskey'))
+ verify = options.get('--tlsverify')
+ skip_hostname_check = options.get('--skip-hostname-check', False)
+
+ tls_version = get_tls_version(environment or {})
+
+ advanced_opts = any([ca_cert, cert, key, verify, tls_version])
+
+ if tls is True and not advanced_opts:
+ return True
+ elif advanced_opts: # --tls is a noop
+ client_cert = None
+ if cert or key:
+ client_cert = (cert, key)
+
+ return TLSConfig(
+ client_cert=client_cert, verify=verify, ca_cert=ca_cert,
+ assert_hostname=False if skip_hostname_check else None,
+ ssl_version=tls_version
+ )
+
+ return None
+
+
+def docker_client(environment, version=None, tls_config=None, host=None,
+ tls_version=None):
+ """
+ Returns a docker-py client configured using environment variables
+ according to the same logic as the official Docker client.
+ """
+ try:
+ kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
+ except TLSParameterError:
+ raise UserError(
+ "TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
+ "and DOCKER_CERT_PATH are set correctly.\n"
+ "You might need to run `eval \"$(docker-machine env default)\"`")
+
+ if host:
+ kwargs['base_url'] = host
+ if tls_config:
+ kwargs['tls'] = tls_config
+
+ if version:
+ kwargs['version'] = version
+
+ timeout = environment.get('COMPOSE_HTTP_TIMEOUT')
+ if timeout:
+ kwargs['timeout'] = int(timeout)
+ else:
+ kwargs['timeout'] = HTTP_TIMEOUT
+
+ kwargs['user_agent'] = generate_user_agent()
+
+ return APIClient(**kwargs)
diff --git a/compose/cli/docopt_command.py b/compose/cli/docopt_command.py
new file mode 100644
index 00000000..809a4b74
--- /dev/null
+++ b/compose/cli/docopt_command.py
@@ -0,0 +1,59 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from inspect import getdoc
+
+from docopt import docopt
+from docopt import DocoptExit
+
+
+def docopt_full_help(docstring, *args, **kwargs):
+ try:
+ return docopt(docstring, *args, **kwargs)
+ except DocoptExit:
+ raise SystemExit(docstring)
+
+
+class DocoptDispatcher(object):
+
+ def __init__(self, command_class, options):
+ self.command_class = command_class
+ self.options = options
+
+ def parse(self, argv):
+ command_help = getdoc(self.command_class)
+ options = docopt_full_help(command_help, argv, **self.options)
+ command = options['COMMAND']
+
+ if command is None:
+ raise SystemExit(command_help)
+
+ handler = get_handler(self.command_class, command)
+ docstring = getdoc(handler)
+
+ if docstring is None:
+ raise NoSuchCommand(command, self)
+
+ command_options = docopt_full_help(docstring, options['ARGS'], options_first=True)
+ return options, handler, command_options
+
+
+def get_handler(command_class, command):
+ command = command.replace('-', '_')
+ # we certainly want to have "exec" command, since that's what docker client has
+ # but in python exec is a keyword
+ if command == "exec":
+ command = "exec_command"
+
+ if not hasattr(command_class, command):
+ raise NoSuchCommand(command, command_class)
+
+ return getattr(command_class, command)
+
+
+class NoSuchCommand(Exception):
+ def __init__(self, command, supercommand):
+ super(NoSuchCommand, self).__init__("No such command: %s" % command)
+
+ self.command = command
+ self.supercommand = supercommand
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
new file mode 100644
index 00000000..1506aa66
--- /dev/null
+++ b/compose/cli/errors.py
@@ -0,0 +1,162 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import contextlib
+import logging
+import socket
+from distutils.spawn import find_executable
+from textwrap import dedent
+
+import six
+from docker.errors import APIError
+from requests.exceptions import ConnectionError as RequestsConnectionError
+from requests.exceptions import ReadTimeout
+from requests.exceptions import SSLError
+from requests.packages.urllib3.exceptions import ReadTimeoutError
+
+from ..const import API_VERSION_TO_ENGINE_VERSION
+from .utils import is_docker_for_mac_installed
+from .utils import is_mac
+from .utils import is_ubuntu
+from .utils import is_windows
+
+
+log = logging.getLogger(__name__)
+
+
+class UserError(Exception):
+
+ def __init__(self, msg):
+ self.msg = dedent(msg).strip()
+
+ def __unicode__(self):
+ return self.msg
+
+ __str__ = __unicode__
+
+
+class ConnectionError(Exception):
+ pass
+
+
+@contextlib.contextmanager
+def handle_connection_errors(client):
+ try:
+ yield
+ except SSLError as e:
+ log.error('SSL error: %s' % e)
+ raise ConnectionError()
+ except RequestsConnectionError as e:
+ if e.args and isinstance(e.args[0], ReadTimeoutError):
+ log_timeout_error(client.timeout)
+ raise ConnectionError()
+ exit_with_error(get_conn_error_message(client.base_url))
+ except APIError as e:
+ log_api_error(e, client.api_version)
+ raise ConnectionError()
+ except (ReadTimeout, socket.timeout) as e:
+ log_timeout_error(client.timeout)
+ raise ConnectionError()
+ except Exception as e:
+ if is_windows():
+ import pywintypes
+ if isinstance(e, pywintypes.error):
+ log_windows_pipe_error(e)
+ raise ConnectionError()
+ raise
+
+
+def log_windows_pipe_error(exc):
+ if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
+ log.error(
+ "The current Compose file version is not compatible with your engine version. "
+ "Please upgrade your Compose file to a more recent version, or set "
+ "a COMPOSE_API_VERSION in your environment."
+ )
+ else:
+ log.error(
+ "Windows named pipe error: {} (code: {})".format(exc.strerror, exc.winerror)
+ )
+
+
+def log_timeout_error(timeout):
+ log.error(
+ "An HTTP request took too long to complete. Retry with --verbose to "
+ "obtain debug information.\n"
+ "If you encounter this issue regularly because of slow network "
+ "conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
+ "value (current value: %s)." % timeout)
+
+
+def log_api_error(e, client_version):
+ explanation = e.explanation
+ if isinstance(explanation, six.binary_type):
+ explanation = explanation.decode('utf-8')
+
+ if 'client is newer than server' not in explanation:
+ log.error(explanation)
+ return
+
+ version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
+ if not version:
+ # They've set a custom API version
+ log.error(explanation)
+ return
+
+ log.error(
+ "The Docker Engine version is less than the minimum required by "
+ "Compose. Your current project requires a Docker Engine of "
+ "version {version} or greater.".format(version=version))
+
+
+def exit_with_error(msg):
+ log.error(dedent(msg).strip())
+ raise ConnectionError()
+
+
+def get_conn_error_message(url):
+ if find_executable('docker') is None:
+ return docker_not_found_msg("Couldn't connect to Docker daemon.")
+ if is_docker_for_mac_installed():
+ return conn_error_docker_for_mac
+ if find_executable('docker-machine') is not None:
+ return conn_error_docker_machine
+ return conn_error_generic.format(url=url)
+
+
+def docker_not_found_msg(problem):
+ return "{} You might need to install Docker:\n\n{}".format(
+ problem, docker_install_url())
+
+
+def docker_install_url():
+ if is_mac():
+ return docker_install_url_mac
+ elif is_ubuntu():
+ return docker_install_url_ubuntu
+ elif is_windows():
+ return docker_install_url_windows
+ else:
+ return docker_install_url_generic
+
+
+docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
+docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
+docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
+docker_install_url_generic = "https://docs.docker.com/engine/installation/"
+
+
+conn_error_docker_machine = """
+ Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
+"""
+
+conn_error_docker_for_mac = """
+ Couldn't connect to Docker daemon. You might need to start Docker for Mac.
+"""
+
+
+conn_error_generic = """
+ Couldn't connect to Docker daemon at {url} - is it running?
+
+ If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
+"""
diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py
new file mode 100644
index 00000000..6c0a3695
--- /dev/null
+++ b/compose/cli/formatter.py
@@ -0,0 +1,51 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+
+import six
+import texttable
+
+from compose.cli import colors
+
+
+def get_tty_width():
+ tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
+ if len(tty_size) != 2:
+ return 0
+ _, width = tty_size
+ return int(width)
+
+
+class Formatter(object):
+ """Format tabular data for printing."""
+ def table(self, headers, rows):
+ table = texttable.Texttable(max_width=get_tty_width())
+ table.set_cols_dtype(['t' for h in headers])
+ table.add_rows([headers] + rows)
+ table.set_deco(table.HEADER)
+ table.set_chars(['-', '|', '+', '-'])
+
+ return table.draw()
+
+
+class ConsoleWarningFormatter(logging.Formatter):
+ """A logging.Formatter which prints WARNING and ERROR messages with
+ a prefix of the log level colored appropriate for the log level.
+ """
+
+ def get_level_message(self, record):
+ separator = ': '
+ if record.levelno == logging.WARNING:
+ return colors.yellow(record.levelname) + separator
+ if record.levelno == logging.ERROR:
+ return colors.red(record.levelname) + separator
+
+ return ''
+
+ def format(self, record):
+ if isinstance(record.msg, six.binary_type):
+ record.msg = record.msg.decode('utf-8')
+ message = super(ConsoleWarningFormatter, self).format(record)
+ return '{0}{1}'.format(self.get_level_message(record), message)
diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py
new file mode 100644
index 00000000..60bba8da
--- /dev/null
+++ b/compose/cli/log_printer.py
@@ -0,0 +1,250 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+from collections import namedtuple
+from itertools import cycle
+from threading import Thread
+
+from docker.errors import APIError
+from six.moves import _thread as thread
+from six.moves.queue import Empty
+from six.moves.queue import Queue
+
+from . import colors
+from compose import utils
+from compose.cli.signals import ShutdownException
+from compose.utils import split_buffer
+
+
+class LogPresenter(object):
+
+ def __init__(self, prefix_width, color_func):
+ self.prefix_width = prefix_width
+ self.color_func = color_func
+
+ def present(self, container, line):
+ prefix = container.name_without_project.ljust(self.prefix_width)
+ return '{prefix} {line}'.format(
+ prefix=self.color_func(prefix + ' |'),
+ line=line)
+
+
+def build_log_presenters(service_names, monochrome):
+ """Return an iterable of functions.
+
+ Each function can be used to format the logs output of a container.
+ """
+ prefix_width = max_name_width(service_names)
+
+ def no_color(text):
+ return text
+
+ for color_func in cycle([no_color] if monochrome else colors.rainbow()):
+ yield LogPresenter(prefix_width, color_func)
+
+
+def max_name_width(service_names, max_index_width=3):
+ """Calculate the maximum width of container names so we can make the log
+ prefixes line up like so:
+
+ db_1 | Listening
+ web_1 | Listening
+ """
+ return max(len(name) for name in service_names) + max_index_width
+
+
+class LogPrinter(object):
+ """Print logs from many containers to a single output stream."""
+
+ def __init__(self,
+ containers,
+ presenters,
+ event_stream,
+ output=sys.stdout,
+ cascade_stop=False,
+ log_args=None):
+ self.containers = containers
+ self.presenters = presenters
+ self.event_stream = event_stream
+ self.output = utils.get_output_stream(output)
+ self.cascade_stop = cascade_stop
+ self.log_args = log_args or {}
+
+ def run(self):
+ if not self.containers:
+ return
+
+ queue = Queue()
+ thread_args = queue, self.log_args
+ thread_map = build_thread_map(self.containers, self.presenters, thread_args)
+ start_producer_thread((
+ thread_map,
+ self.event_stream,
+ self.presenters,
+ thread_args))
+
+ for line in consume_queue(queue, self.cascade_stop):
+ remove_stopped_threads(thread_map)
+
+ if self.cascade_stop:
+ matching_container = [cont.name for cont in self.containers if cont.name == line]
+ if line in matching_container:
+ # Returning the name of the container that started the
+ # the cascade_stop so we can return the correct exit code
+ return line
+
+ if not line:
+ if not thread_map:
+ # There are no running containers left to tail, so exit
+ return
+ # We got an empty line because of a timeout, but there are still
+ # active containers to tail, so continue
+ continue
+
+ self.write(line)
+
+ def write(self, line):
+ try:
+ self.output.write(line)
+ except UnicodeEncodeError:
+ # This may happen if the user's locale settings don't support UTF-8
+ # and UTF-8 characters are present in the log line. The following
+ # will output a "degraded" log with unsupported characters
+ # replaced by `?`
+ self.output.write(line.encode('ascii', 'replace').decode())
+ self.output.flush()
+
+
+def remove_stopped_threads(thread_map):
+ for container_id, tailer_thread in list(thread_map.items()):
+ if not tailer_thread.is_alive():
+ thread_map.pop(container_id, None)
+
+
+def build_thread(container, presenter, queue, log_args):
+ tailer = Thread(
+ target=tail_container_logs,
+ args=(container, presenter, queue, log_args))
+ tailer.daemon = True
+ tailer.start()
+ return tailer
+
+
+def build_thread_map(initial_containers, presenters, thread_args):
+ return {
+ container.id: build_thread(container, next(presenters), *thread_args)
+ for container in initial_containers
+ }
+
+
+class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
+
+ @classmethod
+ def new(cls, item):
+ return cls(item, None, None)
+
+ @classmethod
+ def exception(cls, exc):
+ return cls(None, None, exc)
+
+ @classmethod
+ def stop(cls, item=None):
+ return cls(item, True, None)
+
+
+def tail_container_logs(container, presenter, queue, log_args):
+ generator = get_log_generator(container)
+
+ try:
+ for item in generator(container, log_args):
+ queue.put(QueueItem.new(presenter.present(container, item)))
+ except Exception as e:
+ queue.put(QueueItem.exception(e))
+ return
+ if log_args.get('follow'):
+ queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
+ queue.put(QueueItem.stop(container.name))
+
+
+def get_log_generator(container):
+ if container.has_api_logs:
+ return build_log_generator
+ return build_no_log_generator
+
+
+def build_no_log_generator(container, log_args):
+ """Return a generator that prints a warning about logs and waits for
+ container to exit.
+ """
+ yield "WARNING: no logs are available with the '{}' log driver\n".format(
+ container.log_driver)
+
+
+def build_log_generator(container, log_args):
+ # if the container doesn't have a log_stream we need to attach to container
+ # before log printer starts running
+ if container.log_stream is None:
+ stream = container.logs(stdout=True, stderr=True, stream=True, **log_args)
+ else:
+ stream = container.log_stream
+
+ return split_buffer(stream)
+
+
+def wait_on_exit(container):
+ try:
+ exit_code = container.wait()
+ return "%s exited with code %s\n" % (container.name, exit_code)
+ except APIError as e:
+ return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
+ container.name, e.response.status_code,
+ e.response.text or '[empty]'
+ )
+
+
+def start_producer_thread(thread_args):
+ producer = Thread(target=watch_events, args=thread_args)
+ producer.daemon = True
+ producer.start()
+
+
+def watch_events(thread_map, event_stream, presenters, thread_args):
+ for event in event_stream:
+ if event['action'] == 'stop':
+ thread_map.pop(event['id'], None)
+
+ if event['action'] != 'start':
+ continue
+
+ if event['id'] in thread_map:
+ if thread_map[event['id']].is_alive():
+ continue
+ # Container was stopped and started, we need a new thread
+ thread_map.pop(event['id'], None)
+
+ thread_map[event['id']] = build_thread(
+ event['container'],
+ next(presenters),
+ *thread_args)
+
+
+def consume_queue(queue, cascade_stop):
+ """Consume the queue by reading lines off of it and yielding them."""
+ while True:
+ try:
+ item = queue.get(timeout=0.1)
+ except Empty:
+ yield None
+ continue
+ # See https://github.com/docker/compose/issues/189
+ except thread.error:
+ raise ShutdownException()
+
+ if item.exc:
+ raise item.exc
+
+ if item.is_stop and not cascade_stop:
+ continue
+
+ yield item.item
diff --git a/compose/cli/main.py b/compose/cli/main.py
new file mode 100644
index 00000000..face38e6
--- /dev/null
+++ b/compose/cli/main.py
@@ -0,0 +1,1297 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import contextlib
+import functools
+import json
+import logging
+import pipes
+import re
+import subprocess
+import sys
+from distutils.spawn import find_executable
+from inspect import getdoc
+from operator import attrgetter
+
+from . import errors
+from . import signals
+from .. import __version__
+from ..bundle import get_image_digests
+from ..bundle import MissingDigests
+from ..bundle import serialize_bundle
+from ..config import ConfigurationError
+from ..config import parse_environment
+from ..config import resolve_build_args
+from ..config.environment import Environment
+from ..config.serialize import serialize_config
+from ..config.types import VolumeSpec
+from ..const import COMPOSEFILE_V2_2 as V2_2
+from ..const import IS_WINDOWS_PLATFORM
+from ..errors import StreamParseError
+from ..progress_stream import StreamOutputError
+from ..project import NoSuchService
+from ..project import OneOffFilter
+from ..project import ProjectError
+from ..service import BuildAction
+from ..service import BuildError
+from ..service import ConvergenceStrategy
+from ..service import ImageType
+from ..service import NeedsBuildError
+from ..service import OperationFailedError
+from .command import get_config_from_options
+from .command import project_from_options
+from .docopt_command import DocoptDispatcher
+from .docopt_command import get_handler
+from .docopt_command import NoSuchCommand
+from .errors import UserError
+from .formatter import ConsoleWarningFormatter
+from .formatter import Formatter
+from .log_printer import build_log_presenters
+from .log_printer import LogPrinter
+from .utils import get_version_info
+from .utils import human_readable_file_size
+from .utils import yesno
+
+
+if not IS_WINDOWS_PLATFORM:
+ from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
+
+log = logging.getLogger(__name__)
+console_handler = logging.StreamHandler(sys.stderr)
+
+
+def main():
+ signals.ignore_sigpipe()
+ try:
+ command = dispatch()
+ command()
+ except (KeyboardInterrupt, signals.ShutdownException):
+ log.error("Aborting.")
+ sys.exit(1)
+ except (UserError, NoSuchService, ConfigurationError,
+ ProjectError, OperationFailedError) as e:
+ log.error(e.msg)
+ sys.exit(1)
+ except BuildError as e:
+ log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
+ sys.exit(1)
+ except StreamOutputError as e:
+ log.error(e)
+ sys.exit(1)
+ except NeedsBuildError as e:
+ log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
+ sys.exit(1)
+ except NoSuchCommand as e:
+ commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
+ log.error("No such command: %s\n\n%s", e.command, commands)
+ sys.exit(1)
+ except (errors.ConnectionError, StreamParseError):
+ sys.exit(1)
+
+
+def dispatch():
+ setup_logging()
+ dispatcher = DocoptDispatcher(
+ TopLevelCommand,
+ {'options_first': True, 'version': get_version_info('compose')})
+
+ options, handler, command_options = dispatcher.parse(sys.argv[1:])
+ setup_console_handler(console_handler, options.get('--verbose'), options.get('--no-ansi'))
+ setup_parallel_logger(options.get('--no-ansi'))
+ if options.get('--no-ansi'):
+ command_options['--no-color'] = True
+ return functools.partial(perform_command, options, handler, command_options)
+
+
+def perform_command(options, handler, command_options):
+ if options['COMMAND'] in ('help', 'version'):
+ # Skip looking up the compose file.
+ handler(command_options)
+ return
+
+ if options['COMMAND'] in ('config', 'bundle'):
+ command = TopLevelCommand(None)
+ handler(command, options, command_options)
+ return
+
+ project = project_from_options('.', options)
+ command = TopLevelCommand(project)
+ with errors.handle_connection_errors(project.client):
+ handler(command, command_options)
+
+
+def setup_logging():
+ root_logger = logging.getLogger()
+ root_logger.addHandler(console_handler)
+ root_logger.setLevel(logging.DEBUG)
+
+ # Disable requests logging
+ logging.getLogger("requests").propagate = False
+
+
+def setup_parallel_logger(noansi):
+ if noansi:
+ import compose.parallel
+ compose.parallel.ParallelStreamWriter.set_noansi()
+
+
+def setup_console_handler(handler, verbose, noansi=False):
+ if handler.stream.isatty() and noansi is False:
+ format_class = ConsoleWarningFormatter
+ else:
+ format_class = logging.Formatter
+
+ if verbose:
+ handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
+ handler.setLevel(logging.DEBUG)
+ else:
+ handler.setFormatter(format_class())
+ handler.setLevel(logging.INFO)
+
+
+# stolen from docopt master
+def parse_doc_section(name, source):
+ pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
+ re.IGNORECASE | re.MULTILINE)
+ return [s.strip() for s in pattern.findall(source)]
+
+
+class TopLevelCommand(object):
+ """Define and run multi-container applications with Docker.
+
+ Usage:
+ docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
+ docker-compose -h|--help
+
+ Options:
+ -f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
+ -p, --project-name NAME Specify an alternate project name (default: directory name)
+ --verbose Show more output
+ --no-ansi Do not print ANSI control characters
+ -v, --version Print version and exit
+ -H, --host HOST Daemon socket to connect to
+
+ --tls Use TLS; implied by --tlsverify
+ --tlscacert CA_PATH Trust certs signed only by this CA
+ --tlscert CLIENT_CERT_PATH Path to TLS certificate file
+ --tlskey TLS_KEY_PATH Path to TLS key file
+ --tlsverify Use TLS and verify the remote
+ --skip-hostname-check Don't check the daemon's hostname against the name specified
+ in the client certificate (for example if your docker host
+ is an IP address)
+ --project-directory PATH Specify an alternate working directory
+ (default: the path of the Compose file)
+
+ Commands:
+ build Build or rebuild services
+ bundle Generate a Docker bundle from the Compose file
+ config Validate and view the Compose file
+ create Create services
+ down Stop and remove containers, networks, images, and volumes
+ events Receive real time events from containers
+ exec Execute a command in a running container
+ help Get help on a command
+ images List images
+ kill Kill containers
+ logs View output from containers
+ pause Pause services
+ port Print the public port for a port binding
+ ps List containers
+ pull Pull service images
+ push Push service images
+ restart Restart services
+ rm Remove stopped containers
+ run Run a one-off command
+ scale Set number of containers for a service
+ start Start services
+ stop Stop services
+ top Display the running processes
+ unpause Unpause services
+ up Create and start containers
+ version Show the Docker-Compose version information
+ """
+
+ def __init__(self, project, project_dir='.'):
+ self.project = project
+ self.project_dir = '.'
+
+ def build(self, options):
+ """
+ Build or rebuild services.
+
+ Services are built once and then tagged as `project_service`,
+ e.g. `composetest_db`. If you change a service's `Dockerfile` or the
+ contents of its build directory, you can run `docker-compose build` to rebuild it.
+
+ Usage: build [options] [--build-arg key=val...] [SERVICE...]
+
+ Options:
+ --force-rm Always remove intermediate containers.
+ --no-cache Do not use cache when building the image.
+ --pull Always attempt to pull a newer version of the image.
+ --build-arg key=val Set build-time variables for one service.
+ """
+ service_names = options['SERVICE']
+ build_args = options.get('--build-arg', None)
+ if build_args:
+ environment = Environment.from_env_file(self.project_dir)
+ build_args = resolve_build_args(build_args, environment)
+
+ if not service_names and build_args:
+ raise UserError("Need service name for --build-arg option")
+
+ self.project.build(
+ service_names=service_names,
+ no_cache=bool(options.get('--no-cache', False)),
+ pull=bool(options.get('--pull', False)),
+ force_rm=bool(options.get('--force-rm', False)),
+ build_args=build_args)
+
+ def bundle(self, config_options, options):
+ """
+ Generate a Distributed Application Bundle (DAB) from the Compose file.
+
+ Images must have digests stored, which requires interaction with a
+ Docker registry. If digests aren't stored for all images, you can fetch
+ them with `docker-compose pull` or `docker-compose push`. To push images
+ automatically when bundling, pass `--push-images`. Only services with
+ a `build` option specified will have their images pushed.
+
+ Usage: bundle [options]
+
+ Options:
+ --push-images Automatically push images for any services
+ which have a `build` option specified.
+
+ -o, --output PATH Path to write the bundle file to.
+ Defaults to "<project name>.dab".
+ """
+ self.project = project_from_options('.', config_options)
+ compose_config = get_config_from_options(self.project_dir, config_options)
+
+ output = options["--output"]
+ if not output:
+ output = "{}.dab".format(self.project.name)
+
+ image_digests = image_digests_for_project(self.project, options['--push-images'])
+
+ with open(output, 'w') as f:
+ f.write(serialize_bundle(compose_config, image_digests))
+
+ log.info("Wrote bundle to {}".format(output))
+
+ def config(self, config_options, options):
+ """
+ Validate and view the Compose file.
+
+ Usage: config [options]
+
+ Options:
+ --resolve-image-digests Pin image tags to digests.
+ -q, --quiet Only validate the configuration, don't print
+ anything.
+ --services Print the service names, one per line.
+ --volumes Print the volume names, one per line.
+
+ """
+
+ compose_config = get_config_from_options(self.project_dir, config_options)
+ image_digests = None
+
+ if options['--resolve-image-digests']:
+ self.project = project_from_options('.', config_options)
+ image_digests = image_digests_for_project(self.project)
+
+ if options['--quiet']:
+ return
+
+ if options['--services']:
+ print('\n'.join(service['name'] for service in compose_config.services))
+ return
+
+ if options['--volumes']:
+ print('\n'.join(volume for volume in compose_config.volumes))
+ return
+
+ print(serialize_config(compose_config, image_digests))
+
+ def create(self, options):
+ """
+ Creates containers for a service.
+ This command is deprecated. Use the `up` command with `--no-start` instead.
+
+ Usage: create [options] [SERVICE...]
+
+ Options:
+ --force-recreate Recreate containers even if their configuration and
+ image haven't changed. Incompatible with --no-recreate.
+ --no-recreate If containers already exist, don't recreate them.
+ Incompatible with --force-recreate.
+ --no-build Don't build an image, even if it's missing.
+ --build Build images before creating containers.
+ """
+ service_names = options['SERVICE']
+
+ log.warn(
+ 'The create command is deprecated. '
+ 'Use the up command with the --no-start flag instead.'
+ )
+
+ self.project.create(
+ service_names=service_names,
+ strategy=convergence_strategy_from_opts(options),
+ do_build=build_action_from_opts(options),
+ )
+
+ def down(self, options):
+ """
+ Stops containers and removes containers, networks, volumes, and images
+ created by `up`.
+
+ By default, the only things removed are:
+
+ - Containers for services defined in the Compose file
+ - Networks defined in the `networks` section of the Compose file
+ - The default network, if one is used
+
+ Networks and volumes defined as `external` are never removed.
+
+ Usage: down [options]
+
+ Options:
+ --rmi type Remove images. Type must be one of:
+ 'all': Remove all images used by any service.
+ 'local': Remove only images that don't have a custom tag
+ set by the `image` field.
+ -v, --volumes Remove named volumes declared in the `volumes` section
+ of the Compose file and anonymous volumes
+ attached to containers.
+ --remove-orphans Remove containers for services not defined in the
+ Compose file
+ """
+ image_type = image_type_from_opt('--rmi', options['--rmi'])
+ self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
+
+ def events(self, options):
+ """
+ Receive real time events from containers.
+
+ Usage: events [options] [SERVICE...]
+
+ Options:
+ --json Output events as a stream of json objects
+ """
+ def format_event(event):
+ attributes = ["%s=%s" % item for item in event['attributes'].items()]
+ return ("{time} {type} {action} {id} ({attrs})").format(
+ attrs=", ".join(sorted(attributes)),
+ **event)
+
+ def json_format_event(event):
+ event['time'] = event['time'].isoformat()
+ event.pop('container')
+ return json.dumps(event)
+
+ for event in self.project.events():
+ formatter = json_format_event if options['--json'] else format_event
+ print(formatter(event))
+ sys.stdout.flush()
+
+ def exec_command(self, options):
+ """
+ Execute a command in a running container
+
+ Usage: exec [options] SERVICE COMMAND [ARGS...]
+
+ Options:
+ -d Detached mode: Run command in the background.
+ --privileged Give extended privileges to the process.
+ -u, --user USER Run the command as this user.
+ -T Disable pseudo-tty allocation. By default `docker-compose exec`
+ allocates a TTY.
+ --index=index index of the container if there are multiple
+ instances of a service [default: 1]
+ """
+ index = int(options.get('--index'))
+ service = self.project.get_service(options['SERVICE'])
+ detach = options['-d']
+
+ try:
+ container = service.get_container(number=index)
+ except ValueError as e:
+ raise UserError(str(e))
+ command = [options['COMMAND']] + options['ARGS']
+ tty = not options["-T"]
+
+ if IS_WINDOWS_PLATFORM and not detach:
+ args = ["exec"]
+
+ if options["-d"]:
+ args += ["--detach"]
+ else:
+ args += ["--interactive"]
+
+ if not options["-T"]:
+ args += ["--tty"]
+
+ if options["--privileged"]:
+ args += ["--privileged"]
+
+ if options["--user"]:
+ args += ["--user", options["--user"]]
+
+ args += [container.id]
+ args += command
+
+ sys.exit(call_docker(args))
+
+ create_exec_options = {
+ "privileged": options["--privileged"],
+ "user": options["--user"],
+ "tty": tty,
+ "stdin": tty,
+ }
+
+ exec_id = container.create_exec(command, **create_exec_options)
+
+ if detach:
+ container.start_exec(exec_id, tty=tty, stream=True)
+ return
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ operation = ExecOperation(
+ self.project.client,
+ exec_id,
+ interactive=tty,
+ )
+ pty = PseudoTerminal(self.project.client, operation)
+ pty.start()
+ except signals.ShutdownException:
+ log.info("received shutdown exception: closing")
+ exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
+ sys.exit(exit_code)
+
+ @classmethod
+ def help(cls, options):
+ """
+ Get help on a command.
+
+ Usage: help [COMMAND]
+ """
+ if options['COMMAND']:
+ subject = get_handler(cls, options['COMMAND'])
+ else:
+ subject = cls
+
+ print(getdoc(subject))
+
+ def images(self, options):
+ """
+ List images used by the created containers.
+ Usage: images [options] [SERVICE...]
+
+ Options:
+ -q Only display IDs
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
+
+ if options['-q']:
+ for image in set(c.image for c in containers):
+ print(image.split(':')[1])
+ else:
+ headers = [
+ 'Container',
+ 'Repository',
+ 'Tag',
+ 'Image Id',
+ 'Size'
+ ]
+ rows = []
+ for container in containers:
+ image_config = container.image_config
+ repo_tags = image_config['RepoTags'][0].rsplit(':', 1)
+ image_id = image_config['Id'].split(':')[1][:12]
+ size = human_readable_file_size(image_config['Size'])
+ rows.append([
+ container.name,
+ repo_tags[0],
+ repo_tags[1],
+ image_id,
+ size
+ ])
+ print(Formatter().table(headers, rows))
+
+ def kill(self, options):
+ """
+ Force stop service containers.
+
+ Usage: kill [options] [SERVICE...]
+
+ Options:
+ -s SIGNAL SIGNAL to send to the container.
+ Default signal is SIGKILL.
+ """
+ signal = options.get('-s', 'SIGKILL')
+
+ self.project.kill(service_names=options['SERVICE'], signal=signal)
+
+ def logs(self, options):
+ """
+ View output from containers.
+
+ Usage: logs [options] [SERVICE...]
+
+ Options:
+ --no-color Produce monochrome output.
+ -f, --follow Follow log output.
+ -t, --timestamps Show timestamps.
+ --tail="all" Number of lines to show from the end of the logs
+ for each container.
+ """
+ containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
+
+ tail = options['--tail']
+ if tail is not None:
+ if tail.isdigit():
+ tail = int(tail)
+ elif tail != 'all':
+ raise UserError("tail flag must be all or a number")
+ log_args = {
+ 'follow': options['--follow'],
+ 'tail': tail,
+ 'timestamps': options['--timestamps']
+ }
+ print("Attaching to", list_containers(containers))
+ log_printer_from_project(
+ self.project,
+ containers,
+ options['--no-color'],
+ log_args,
+ event_stream=self.project.events(service_names=options['SERVICE'])).run()
+
+ def pause(self, options):
+ """
+ Pause services.
+
+ Usage: pause [SERVICE...]
+ """
+ containers = self.project.pause(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to pause', 1)
+
+ def port(self, options):
+ """
+ Print the public port for a port binding.
+
+ Usage: port [options] SERVICE PRIVATE_PORT
+
+ Options:
+ --protocol=proto tcp or udp [default: tcp]
+ --index=index index of the container if there are multiple
+ instances of a service [default: 1]
+ """
+ index = int(options.get('--index'))
+ service = self.project.get_service(options['SERVICE'])
+ try:
+ container = service.get_container(number=index)
+ except ValueError as e:
+ raise UserError(str(e))
+ print(container.get_local_port(
+ options['PRIVATE_PORT'],
+ protocol=options.get('--protocol') or 'tcp') or '')
+
+ def ps(self, options):
+ """
+ List containers.
+
+ Usage: ps [options] [SERVICE...]
+
+ Options:
+ -q Only display IDs
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
+
+ if options['-q']:
+ for container in containers:
+ print(container.id)
+ else:
+ headers = [
+ 'Name',
+ 'Command',
+ 'State',
+ 'Ports',
+ ]
+ rows = []
+ for container in containers:
+ command = container.human_readable_command
+ if len(command) > 30:
+ command = '%s ...' % command[:26]
+ rows.append([
+ container.name,
+ command,
+ container.human_readable_state,
+ container.human_readable_ports,
+ ])
+ print(Formatter().table(headers, rows))
+
+ def pull(self, options):
+ """
+ Pulls images for services defined in a Compose file, but does not start the containers.
+
+ Usage: pull [options] [SERVICE...]
+
+ Options:
+ --ignore-pull-failures Pull what it can and ignores images with pull failures.
+ --parallel Pull multiple images in parallel.
+ --quiet Pull without printing progress information
+ """
+ self.project.pull(
+ service_names=options['SERVICE'],
+ ignore_pull_failures=options.get('--ignore-pull-failures'),
+ parallel_pull=options.get('--parallel'),
+ silent=options.get('--quiet'),
+ )
+
+ def push(self, options):
+ """
+ Pushes images for services.
+
+ Usage: push [options] [SERVICE...]
+
+ Options:
+ --ignore-push-failures Push what it can and ignores images with push failures.
+ """
+ self.project.push(
+ service_names=options['SERVICE'],
+ ignore_push_failures=options.get('--ignore-push-failures')
+ )
+
+ def rm(self, options):
+ """
+ Removes stopped service containers.
+
+ By default, anonymous volumes attached to containers will not be removed. You
+ can override this with `-v`. To list all volumes, use `docker volume ls`.
+
+ Any data which is not in a volume will be lost.
+
+ Usage: rm [options] [SERVICE...]
+
+ Options:
+ -f, --force Don't ask to confirm removal
+ -s, --stop Stop the containers, if required, before removing
+ -v Remove any anonymous volumes attached to containers
+ -a, --all Deprecated - no effect.
+ """
+ if options.get('--all'):
+ log.warn(
+ '--all flag is obsolete. This is now the default behavior '
+ 'of `docker-compose rm`'
+ )
+ one_off = OneOffFilter.include
+
+ if options.get('--stop'):
+ self.project.stop(service_names=options['SERVICE'], one_off=one_off)
+
+ all_containers = self.project.containers(
+ service_names=options['SERVICE'], stopped=True, one_off=one_off
+ )
+ stopped_containers = [c for c in all_containers if not c.is_running]
+
+ if len(stopped_containers) > 0:
+ print("Going to remove", list_containers(stopped_containers))
+ if options.get('--force') \
+ or yesno("Are you sure? [yN] ", default=False):
+ self.project.remove_stopped(
+ service_names=options['SERVICE'],
+ v=options.get('-v', False),
+ one_off=one_off
+ )
+ else:
+ print("No stopped containers")
+
+ def run(self, options):
+ """
+ Run a one-off command on a service.
+
+ For example:
+
+ $ docker-compose run web python manage.py shell
+
+ By default, linked services will be started, unless they are already
+ running. If you do not want to start linked services, use
+ `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
+
+ Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
+
+ Options:
+ -d Detached mode: Run container in the background, print
+ new container name.
+ --name NAME Assign a name to the container
+ --entrypoint CMD Override the entrypoint of the image.
+ -e KEY=VAL Set an environment variable (can be used multiple times)
+ -u, --user="" Run as specified username or uid
+ --no-deps Don't start linked services.
+ --rm Remove container after run. Ignored in detached mode.
+ -p, --publish=[] Publish a container's port(s) to the host
+ --service-ports Run command with the service's ports enabled and mapped
+ to the host.
+ -v, --volume=[] Bind mount a volume (default [])
+ -T Disable pseudo-tty allocation. By default `docker-compose run`
+ allocates a TTY.
+ -w, --workdir="" Working directory inside the container
+ """
+ service = self.project.get_service(options['SERVICE'])
+ detach = options['-d']
+
+ if options['--publish'] and options['--service-ports']:
+ raise UserError(
+ 'Service port mapping and manual port mapping '
+ 'can not be used together'
+ )
+
+ if options['COMMAND'] is not None:
+ command = [options['COMMAND']] + options['ARGS']
+ elif options['--entrypoint'] is not None:
+ command = []
+ else:
+ command = service.options.get('command')
+
+ container_options = build_container_options(options, detach, command)
+ run_one_off_container(container_options, self.project, service, options)
+
+ def scale(self, options):
+ """
+ Set number of containers to run for a service.
+
+ Numbers are specified in the form `service=num` as arguments.
+ For example:
+
+ $ docker-compose scale web=2 worker=3
+
+ This command is deprecated. Use the up command with the `--scale` flag
+ instead.
+
+ Usage: scale [options] [SERVICE=NUM...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+
+ if self.project.config_version == V2_2:
+ raise UserError(
+ 'The scale command is incompatible with the v2.2 format. '
+ 'Use the up command with the --scale flag instead.'
+ )
+ else:
+ log.warn(
+ 'The scale command is deprecated. '
+ 'Use the up command with the --scale flag instead.'
+ )
+
+ for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
+ self.project.get_service(service_name).scale(num, timeout=timeout)
+
+ def start(self, options):
+ """
+ Start existing containers.
+
+ Usage: start [SERVICE...]
+ """
+ containers = self.project.start(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to start', 1)
+
+ def stop(self, options):
+ """
+ Stop running containers without removing them.
+
+ They can be started again with `docker-compose start`.
+
+ Usage: stop [options] [SERVICE...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+ self.project.stop(service_names=options['SERVICE'], timeout=timeout)
+
+ def restart(self, options):
+ """
+ Restart running containers.
+
+ Usage: restart [options] [SERVICE...]
+
+ Options:
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
+ """
+ timeout = timeout_from_opts(options)
+ containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
+ exit_if(not containers, 'No containers to restart', 1)
+
+ def top(self, options):
+ """
+ Display the running processes
+
+ Usage: top [SERVICE...]
+
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=False) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name')
+ )
+
+ for idx, container in enumerate(containers):
+ if idx > 0:
+ print()
+
+ top_data = self.project.client.top(container.name)
+ headers = top_data.get("Titles")
+ rows = []
+
+ for process in top_data.get("Processes", []):
+ rows.append(process)
+
+ print(container.name)
+ print(Formatter().table(headers, rows))
+
+ def unpause(self, options):
+ """
+ Unpause services.
+
+ Usage: unpause [SERVICE...]
+ """
+ containers = self.project.unpause(service_names=options['SERVICE'])
+ exit_if(not containers, 'No containers to unpause', 1)
+
+ def up(self, options):
+ """
+ Builds, (re)creates, starts, and attaches to containers for a service.
+
+ Unless they are already running, this command also starts any linked services.
+
+ The `docker-compose up` command aggregates the output of each container. When
+ the command exits, all containers are stopped. Running `docker-compose up -d`
+ starts the containers in the background and leaves them running.
+
+ If there are existing containers for a service, and the service's configuration
+ or image was changed after the container's creation, `docker-compose up` picks
+ up the changes by stopping and recreating the containers (preserving mounted
+ volumes). To prevent Compose from picking up changes, use the `--no-recreate`
+ flag.
+
+ If you want to force Compose to stop and recreate all containers, use the
+ `--force-recreate` flag.
+
+ Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
+
+ Options:
+ -d Detached mode: Run containers in the background,
+ print new container names.
+ Incompatible with --abort-on-container-exit.
+ --no-color Produce monochrome output.
+ --no-deps Don't start linked services.
+ --force-recreate Recreate containers even if their configuration
+ and image haven't changed.
+ Incompatible with --no-recreate.
+ --no-recreate If containers already exist, don't recreate them.
+ Incompatible with --force-recreate.
+ --no-build Don't build an image, even if it's missing.
+ --no-start Don't start the services after creating them.
+ --build Build images before starting containers.
+ --abort-on-container-exit Stops all containers if any container was stopped.
+ Incompatible with -d.
+ -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
+ when attached or when containers are already
+ running. (default: 10)
+ --remove-orphans Remove containers for services not
+ defined in the Compose file
+ --exit-code-from SERVICE Return the exit code of the selected service container.
+ Implies --abort-on-container-exit.
+ --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale`
+ setting in the Compose file if present.
+ """
+ start_deps = not options['--no-deps']
+ exit_value_from = exitval_from_opts(options, self.project)
+ cascade_stop = options['--abort-on-container-exit']
+ service_names = options['SERVICE']
+ timeout = timeout_from_opts(options)
+ remove_orphans = options['--remove-orphans']
+ detached = options.get('-d')
+ no_start = options.get('--no-start')
+
+ if detached and (cascade_stop or exit_value_from):
+ raise UserError("--abort-on-container-exit and -d cannot be combined.")
+
+ if no_start:
+ for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
+ if options.get(excluded):
+ raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+
+ with up_shutdown_context(self.project, service_names, timeout, detached):
+ to_attach = self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=convergence_strategy_from_opts(options),
+ do_build=build_action_from_opts(options),
+ timeout=timeout,
+ detached=detached,
+ remove_orphans=remove_orphans,
+ scale_override=parse_scale_args(options['--scale']),
+ start=not no_start
+ )
+
+ if detached or no_start:
+ return
+
+ attached_containers = filter_containers_to_service_names(to_attach, service_names)
+
+ log_printer = log_printer_from_project(
+ self.project,
+ attached_containers,
+ options['--no-color'],
+ {'follow': True},
+ cascade_stop,
+ event_stream=self.project.events(service_names=service_names))
+ print("Attaching to", list_containers(log_printer.containers))
+ cascade_starter = log_printer.run()
+
+ if cascade_stop:
+ print("Aborting on container exit...")
+ all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
+ exit_code = compute_exit_code(
+ exit_value_from, attached_containers, cascade_starter, all_containers
+ )
+
+ self.project.stop(service_names=service_names, timeout=timeout)
+ sys.exit(exit_code)
+
+ @classmethod
+ def version(cls, options):
+ """
+ Show version informations
+
+ Usage: version [--short]
+
+ Options:
+ --short Shows only Compose's version number.
+ """
+ if options['--short']:
+ print(__version__)
+ else:
+ print(get_version_info('full'))
+
+
+def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
+ exit_code = 0
+ if exit_value_from:
+ candidates = list(filter(
+ lambda c: c.service == exit_value_from,
+ attached_containers))
+ if not candidates:
+ log.error(
+ 'No containers matching the spec "{0}" '
+ 'were run.'.format(exit_value_from)
+ )
+ exit_code = 2
+ elif len(candidates) > 1:
+ exit_values = filter(
+ lambda e: e != 0,
+ [c.inspect()['State']['ExitCode'] for c in candidates]
+ )
+
+ exit_code = exit_values[0]
+ else:
+ exit_code = candidates[0].inspect()['State']['ExitCode']
+ else:
+ for e in all_containers:
+ if (not e.is_running and cascade_starter == e.name):
+ if not e.exit_code == 0:
+ exit_code = e.exit_code
+ break
+
+ return exit_code
+
+
+def convergence_strategy_from_opts(options):
+ no_recreate = options['--no-recreate']
+ force_recreate = options['--force-recreate']
+ if force_recreate and no_recreate:
+ raise UserError("--force-recreate and --no-recreate cannot be combined.")
+
+ if force_recreate:
+ return ConvergenceStrategy.always
+
+ if no_recreate:
+ return ConvergenceStrategy.never
+
+ return ConvergenceStrategy.changed
+
+
+def timeout_from_opts(options):
+ timeout = options.get('--timeout')
+ return None if timeout is None else int(timeout)
+
+
+def image_digests_for_project(project, allow_push=False):
+ with errors.handle_connection_errors(project.client):
+ try:
+ return get_image_digests(
+ project,
+ allow_push=allow_push
+ )
+ except MissingDigests as e:
+ def list_images(images):
+ return "\n".join(" {}".format(name) for name in sorted(images))
+
+ paras = ["Some images are missing digests."]
+
+ if e.needs_push:
+ command_hint = (
+ "Use `docker-compose push {}` to push them. "
+ .format(" ".join(sorted(e.needs_push)))
+ )
+ paras += [
+ "The following images can be pushed:",
+ list_images(e.needs_push),
+ command_hint,
+ ]
+
+ if e.needs_pull:
+ command_hint = (
+ "Use `docker-compose pull {}` to pull them. "
+ .format(" ".join(sorted(e.needs_pull)))
+ )
+
+ paras += [
+ "The following images need to be pulled:",
+ list_images(e.needs_pull),
+ command_hint,
+ ]
+
+ raise UserError("\n\n".join(paras))
+
+
+def exitval_from_opts(options, project):
+ exit_value_from = options.get('--exit-code-from')
+ if exit_value_from:
+ if not options.get('--abort-on-container-exit'):
+ log.warn('using --exit-code-from implies --abort-on-container-exit')
+ options['--abort-on-container-exit'] = True
+ if exit_value_from not in [s.name for s in project.get_services()]:
+ log.error('No service named "%s" was found in your compose file.',
+ exit_value_from)
+ sys.exit(2)
+ return exit_value_from
+
+
+def image_type_from_opt(flag, value):
+ if not value:
+ return ImageType.none
+ try:
+ return ImageType[value]
+ except KeyError:
+ raise UserError("%s flag must be one of: all, local" % flag)
+
+
+def build_action_from_opts(options):
+ if options['--build'] and options['--no-build']:
+ raise UserError("--build and --no-build can not be combined.")
+
+ if options['--build']:
+ return BuildAction.force
+
+ if options['--no-build']:
+ return BuildAction.skip
+
+ return BuildAction.none
+
+
+def build_container_options(options, detach, command):
+ container_options = {
+ 'command': command,
+ 'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
+ 'stdin_open': not detach,
+ 'detach': detach,
+ }
+
+ if options['-e']:
+ container_options['environment'] = Environment.from_command_line(
+ parse_environment(options['-e'])
+ )
+
+ if options['--entrypoint']:
+ container_options['entrypoint'] = options.get('--entrypoint')
+
+ if options['--rm']:
+ container_options['restart'] = None
+
+ if options['--user']:
+ container_options['user'] = options.get('--user')
+
+ if not options['--service-ports']:
+ container_options['ports'] = []
+
+ if options['--publish']:
+ container_options['ports'] = options.get('--publish')
+
+ if options['--name']:
+ container_options['name'] = options['--name']
+
+ if options['--workdir']:
+ container_options['working_dir'] = options['--workdir']
+
+ if options['--volume']:
+ volumes = [VolumeSpec.parse(i) for i in options['--volume']]
+ container_options['volumes'] = volumes
+
+ return container_options
+
+
+def run_one_off_container(container_options, project, service, options):
+ if not options['--no-deps']:
+ deps = service.get_dependency_names()
+ if deps:
+ project.up(
+ service_names=deps,
+ start_deps=True,
+ strategy=ConvergenceStrategy.never,
+ rescale=False
+ )
+
+ project.initialize()
+
+ container = service.create_container(
+ quiet=True,
+ one_off=True,
+ **container_options)
+
+ if options['-d']:
+ service.start_container(container)
+ print(container.name)
+ return
+
+ def remove_container(force=False):
+ if options['--rm']:
+ project.client.remove_container(container.id, force=True, v=True)
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ try:
+ if IS_WINDOWS_PLATFORM:
+ service.connect_container_to_networks(container)
+ exit_code = call_docker(["start", "--attach", "--interactive", container.id])
+ else:
+ operation = RunOperation(
+ project.client,
+ container.id,
+ interactive=not options['-T'],
+ logs=False,
+ )
+ pty = PseudoTerminal(project.client, operation)
+ sockets = pty.sockets()
+ service.start_container(container)
+ pty.start(sockets)
+ exit_code = container.wait()
+ except signals.ShutdownException:
+ project.client.stop(container.id)
+ exit_code = 1
+ except signals.ShutdownException:
+ project.client.kill(container.id)
+ remove_container(force=True)
+ sys.exit(2)
+
+ remove_container()
+ sys.exit(exit_code)
+
+
+def log_printer_from_project(
+ project,
+ containers,
+ monochrome,
+ log_args,
+ cascade_stop=False,
+ event_stream=None,
+):
+ return LogPrinter(
+ containers,
+ build_log_presenters(project.service_names, monochrome),
+ event_stream or project.events(),
+ cascade_stop=cascade_stop,
+ log_args=log_args)
+
+
+def filter_containers_to_service_names(containers, service_names):
+ if not service_names:
+ return containers
+
+ return [
+ container
+ for container in containers if container.service in service_names
+ ]
+
+
+@contextlib.contextmanager
+def up_shutdown_context(project, service_names, timeout, detached):
+ if detached:
+ yield
+ return
+
+ signals.set_signal_handler_to_shutdown()
+ try:
+ try:
+ yield
+ except signals.ShutdownException:
+ print("Gracefully stopping... (press Ctrl+C again to force)")
+ project.stop(service_names=service_names, timeout=timeout)
+ except signals.ShutdownException:
+ project.kill(service_names=service_names)
+ sys.exit(2)
+
+
+def list_containers(containers):
+ return ", ".join(c.name for c in containers)
+
+
+def exit_if(condition, message, exit_code):
+ if condition:
+ log.error(message)
+ raise SystemExit(exit_code)
+
+
+def call_docker(args):
+ executable_path = find_executable('docker')
+ if not executable_path:
+ raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
+
+ args = [executable_path] + args
+ log.debug(" ".join(map(pipes.quote, args)))
+
+ return subprocess.call(args)
+
+
+def parse_scale_args(options):
+ res = {}
+ for s in options:
+ if '=' not in s:
+ raise UserError('Arguments to scale should be in the form service=num')
+ service_name, num = s.split('=', 1)
+ try:
+ num = int(num)
+ except ValueError:
+ raise UserError(
+ 'Number of containers for service "%s" is not a number' % service_name
+ )
+ res[service_name] = num
+ return res
diff --git a/compose/cli/signals.py b/compose/cli/signals.py
new file mode 100644
index 00000000..9b360c44
--- /dev/null
+++ b/compose/cli/signals.py
@@ -0,0 +1,30 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import signal
+
+from ..const import IS_WINDOWS_PLATFORM
+
+
+class ShutdownException(Exception):
+ pass
+
+
+def shutdown(signal, frame):
+ raise ShutdownException()
+
+
+def set_signal_handler(handler):
+ signal.signal(signal.SIGINT, handler)
+ signal.signal(signal.SIGTERM, handler)
+
+
+def set_signal_handler_to_shutdown():
+ set_signal_handler(shutdown)
+
+
+def ignore_sigpipe():
+ # Restore default behavior for SIGPIPE instead of raising
+ # an exception when encountered.
+ if not IS_WINDOWS_PLATFORM:
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
new file mode 100644
index 00000000..4d4fc4c1
--- /dev/null
+++ b/compose/cli/utils.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import unicode_literals
+
+import math
+import os
+import platform
+import ssl
+import subprocess
+import sys
+
+import docker
+
+import compose
+from ..const import IS_WINDOWS_PLATFORM
+
+# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
+# defining it as OSError (its parent class) if missing.
+try:
+ WindowsError
+except NameError:
+ WindowsError = OSError
+
+
+def yesno(prompt, default=None):
+ """
+ Prompt the user for a yes or no.
+
+ Can optionally specify a default value, which will only be
+ used if they enter a blank line.
+
+ Unrecognised input (anything other than "y", "n", "yes",
+ "no" or "") will return None.
+ """
+ answer = input(prompt).strip().lower()
+
+ if answer == "y" or answer == "yes":
+ return True
+ elif answer == "n" or answer == "no":
+ return False
+ elif answer == "":
+ return default
+ else:
+ return None
+
+
+def input(prompt):
+ """
+ Version of input (raw_input in Python 2) which forces a flush of sys.stdout
+ to avoid problems where the prompt fails to appear due to line buffering
+ """
+ sys.stdout.write(prompt)
+ sys.stdout.flush()
+ return sys.stdin.readline().rstrip('\n')
+
+
+def call_silently(*args, **kwargs):
+ """
+ Like subprocess.call(), but redirects stdout and stderr to /dev/null.
+ """
+ with open(os.devnull, 'w') as shutup:
+ try:
+ return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs)
+ except WindowsError:
+ # On Windows, subprocess.call() can still raise exceptions. Normalize
+ # to POSIXy behaviour by returning a nonzero exit code.
+ return 1
+
+
+def is_mac():
+ return platform.system() == 'Darwin'
+
+
+def is_ubuntu():
+ return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
+
+
+def is_windows():
+ return IS_WINDOWS_PLATFORM
+
+
+def get_version_info(scope):
+ versioninfo = 'docker-compose version {}, build {}'.format(
+ compose.__version__,
+ get_build_version())
+
+ if scope == 'compose':
+ return versioninfo
+ if scope == 'full':
+ return (
+ "{}\n"
+ "docker-py version: {}\n"
+ "{} version: {}\n"
+ "OpenSSL version: {}"
+ ).format(
+ versioninfo,
+ docker.version,
+ platform.python_implementation(),
+ platform.python_version(),
+ ssl.OPENSSL_VERSION)
+
+ raise ValueError("{} is not a valid version scope".format(scope))
+
+
+def get_build_version():
+ filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA')
+ if not os.path.exists(filename):
+ return 'unknown'
+
+ with open(filename) as fh:
+ return fh.read().strip()
+
+
+def is_docker_for_mac_installed():
+ return is_mac() and os.path.isdir('/Applications/Docker.app')
+
+
+def generate_user_agent():
+ parts = [
+ "docker-compose/{}".format(compose.__version__),
+ "docker-py/{}".format(docker.__version__),
+ ]
+ try:
+ p_system = platform.system()
+ p_release = platform.release()
+ except IOError:
+ pass
+ else:
+ parts.append("{}/{}".format(p_system, p_release))
+ return " ".join(parts)
+
+
+def unquote_path(s):
+ if not s:
+ return s
+ if s[0] == '"' and s[-1] == '"':
+ return s[1:-1]
+ return s
+
+
+def human_readable_file_size(size):
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
+ order = int(math.log(size, 2) / 10) if size else 0
+ if order >= len(suffixes):
+ order = len(suffixes) - 1
+
+ return '{0:.3g} {1}'.format(
+ size / float(1 << (order * 10)),
+ suffixes[order]
+ )
diff --git a/compose/cli/verbose_proxy.py b/compose/cli/verbose_proxy.py
new file mode 100644
index 00000000..b1592eab
--- /dev/null
+++ b/compose/cli/verbose_proxy.py
@@ -0,0 +1,60 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import logging
+import pprint
+from itertools import chain
+
+import six
+
+
+def format_call(args, kwargs):
+ args = (repr(a) for a in args)
+ kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs))
+ return "({0})".format(", ".join(chain(args, kwargs)))
+
+
+def format_return(result, max_lines):
+ if isinstance(result, (list, tuple, set)):
+ return "({0} with {1} items)".format(type(result).__name__, len(result))
+
+ if result:
+ lines = pprint.pformat(result).split('\n')
+ extra = '\n...' if len(lines) > max_lines else ''
+ return '\n'.join(lines[:max_lines]) + extra
+
+ return result
+
+
+class VerboseProxy(object):
+ """Proxy all function calls to another class and log method name, arguments
+ and return values for each call.
+ """
+
+ def __init__(self, obj_name, obj, log_name=None, max_lines=10):
+ self.obj_name = obj_name
+ self.obj = obj
+ self.max_lines = max_lines
+ self.log = logging.getLogger(log_name or __name__)
+
+ def __getattr__(self, name):
+ attr = getattr(self.obj, name)
+
+ if not six.callable(attr):
+ return attr
+
+ return functools.partial(self.proxy_callable, name)
+
+ def proxy_callable(self, call_name, *args, **kwargs):
+ self.log.info("%s %s <- %s",
+ self.obj_name,
+ call_name,
+ format_call(args, kwargs))
+
+ result = getattr(self.obj, call_name)(*args, **kwargs)
+ self.log.info("%s %s -> %s",
+ self.obj_name,
+ call_name,
+ format_return(result, self.max_lines))
+ return result
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
new file mode 100644
index 00000000..b629edf6
--- /dev/null
+++ b/compose/config/__init__.py
@@ -0,0 +1,12 @@
+# flake8: noqa
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from . import environment
+from .config import ConfigurationError
+from .config import DOCKER_CONFIG_KEYS
+from .config import find
+from .config import load
+from .config import merge_environment
+from .config import parse_environment
+from .config import resolve_build_args
diff --git a/compose/config/config.py b/compose/config/config.py
new file mode 100644
index 00000000..d5aaf953
--- /dev/null
+++ b/compose/config/config.py
@@ -0,0 +1,1306 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import logging
+import os
+import string
+import sys
+from collections import namedtuple
+
+import six
+import yaml
+from cached_property import cached_property
+
+from . import types
+from .. import const
+from ..const import COMPOSEFILE_V1 as V1
+from ..const import COMPOSEFILE_V2_1 as V2_1
+from ..const import COMPOSEFILE_V3_0 as V3_0
+from ..const import COMPOSEFILE_V3_4 as V3_4
+from ..utils import build_string_dict
+from ..utils import parse_bytes
+from ..utils import parse_nanoseconds_int
+from ..utils import splitdrive
+from ..version import ComposeVersion
+from .environment import env_vars_from_file
+from .environment import Environment
+from .environment import split_env
+from .errors import CircularReference
+from .errors import ComposeFileNotFound
+from .errors import ConfigurationError
+from .errors import DuplicateOverrideFileFound
+from .errors import VERSION_EXPLANATION
+from .interpolation import interpolate_environment_variables
+from .sort_services import get_container_name_from_network_mode
+from .sort_services import get_service_name_from_network_mode
+from .sort_services import sort_service_dicts
+from .types import parse_extra_hosts
+from .types import parse_restart_spec
+from .types import ServiceLink
+from .types import ServicePort
+from .types import VolumeFromSpec
+from .types import VolumeSpec
+from .validation import match_named_volumes
+from .validation import validate_against_config_schema
+from .validation import validate_config_section
+from .validation import validate_cpu
+from .validation import validate_depends_on
+from .validation import validate_extends_file_path
+from .validation import validate_links
+from .validation import validate_network_mode
+from .validation import validate_pid_mode
+from .validation import validate_service_constraints
+from .validation import validate_top_level_object
+from .validation import validate_ulimits
+
+
+DOCKER_CONFIG_KEYS = [
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'command',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpus',
+ 'cpuset',
+ 'detach',
+ 'devices',
+ 'dns',
+ 'dns_search',
+ 'dns_opt',
+ 'domainname',
+ 'entrypoint',
+ 'env_file',
+ 'environment',
+ 'extra_hosts',
+ 'group_add',
+ 'hostname',
+ 'healthcheck',
+ 'image',
+ 'ipc',
+ 'labels',
+ 'links',
+ 'mac_address',
+ 'mem_limit',
+ 'mem_reservation',
+ 'memswap_limit',
+ 'mem_swappiness',
+ 'net',
+ 'oom_score_adj',
+ 'pid',
+ 'ports',
+ 'privileged',
+ 'read_only',
+ 'restart',
+ 'secrets',
+ 'security_opt',
+ 'shm_size',
+ 'pids_limit',
+ 'stdin_open',
+ 'stop_signal',
+ 'sysctls',
+ 'tty',
+ 'user',
+ 'userns_mode',
+ 'volume_driver',
+ 'volumes',
+ 'volumes_from',
+ 'working_dir',
+]
+
+ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
+ 'blkio_config',
+ 'build',
+ 'container_name',
+ 'credential_spec',
+ 'dockerfile',
+ 'log_driver',
+ 'log_opt',
+ 'logging',
+ 'network_mode',
+ 'init',
+ 'scale',
+]
+
+DOCKER_VALID_URL_PREFIXES = (
+ 'http://',
+ 'https://',
+ 'git://',
+ 'github.com/',
+ 'git@',
+)
+
+SUPPORTED_FILENAMES = [
+ 'docker-compose.yml',
+ 'docker-compose.yaml',
+]
+
+DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml')
+
+
+log = logging.getLogger(__name__)
+
+
+class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
+ """
+ :param working_dir: the directory to use for relative paths in the config
+ :type working_dir: string
+ :param config_files: list of configuration files to load
+ :type config_files: list of :class:`ConfigFile`
+ :param environment: computed environment values for this project
+ :type environment: :class:`environment.Environment`
+ """
+ def __new__(cls, working_dir, config_files, environment=None):
+ if environment is None:
+ environment = Environment.from_env_file(working_dir)
+ return super(ConfigDetails, cls).__new__(
+ cls, working_dir, config_files, environment
+ )
+
+
+class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
+ """
+ :param filename: filename of the config file
+ :type filename: string
+ :param config: contents of the config file
+ :type config: :class:`dict`
+ """
+
+ @classmethod
+ def from_filename(cls, filename):
+ return cls(filename, load_yaml(filename))
+
+ @cached_property
+ def version(self):
+ if 'version' not in self.config:
+ return V1
+
+ version = self.config['version']
+
+ if isinstance(version, dict):
+ log.warn('Unexpected type for "version" key in "{}". Assuming '
+ '"version" is the name of a service, and defaulting to '
+ 'Compose file version 1.'.format(self.filename))
+ return V1
+
+ if not isinstance(version, six.string_types):
+ raise ConfigurationError(
+ 'Version in "{}" is invalid - it should be a string.'
+ .format(self.filename))
+
+ if version == '1':
+ raise ConfigurationError(
+ 'Version in "{}" is invalid. {}'
+ .format(self.filename, VERSION_EXPLANATION)
+ )
+
+ if version == '2':
+ return const.COMPOSEFILE_V2_0
+
+ if version == '3':
+ return const.COMPOSEFILE_V3_0
+
+ return ComposeVersion(version)
+
+ def get_service(self, name):
+ return self.get_service_dicts()[name]
+
+ def get_service_dicts(self):
+ return self.config if self.version == V1 else self.config.get('services', {})
+
+ def get_volumes(self):
+ return {} if self.version == V1 else self.config.get('volumes', {})
+
+ def get_networks(self):
+ return {} if self.version == V1 else self.config.get('networks', {})
+
+ def get_secrets(self):
+ return {} if self.version < const.COMPOSEFILE_V3_1 else self.config.get('secrets', {})
+
+ def get_configs(self):
+ return {} if self.version < const.COMPOSEFILE_V3_3 else self.config.get('configs', {})
+
+
+class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
+ """
+ :param version: configuration version
+ :type version: int
+ :param services: List of service description dictionaries
+ :type services: :class:`list`
+ :param volumes: Dictionary mapping volume names to description dictionaries
+ :type volumes: :class:`dict`
+ :param networks: Dictionary mapping network names to description dictionaries
+ :type networks: :class:`dict`
+ :param secrets: Dictionary mapping secret names to description dictionaries
+ :type secrets: :class:`dict`
+ :param configs: Dictionary mapping config names to description dictionaries
+ :type configs: :class:`dict`
+ """
+
+
+class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
+
+ @classmethod
+ def with_abs_paths(cls, working_dir, filename, name, config):
+ if not working_dir:
+ raise ValueError("No working_dir for ServiceConfig.")
+
+ return cls(
+ os.path.abspath(working_dir),
+ os.path.abspath(filename) if filename else filename,
+ name,
+ config)
+
+
+def find(base_dir, filenames, environment, override_dir=None):
+ if filenames == ['-']:
+ return ConfigDetails(
+ os.path.abspath(override_dir) if override_dir else os.getcwd(),
+ [ConfigFile(None, yaml.safe_load(sys.stdin))],
+ environment
+ )
+
+ if filenames:
+ filenames = [os.path.join(base_dir, f) for f in filenames]
+ else:
+ filenames = get_default_config_files(base_dir)
+
+ log.debug("Using configuration files: {}".format(",".join(filenames)))
+ return ConfigDetails(
+ override_dir if override_dir else os.path.dirname(filenames[0]),
+ [ConfigFile.from_filename(f) for f in filenames],
+ environment
+ )
+
+
+def validate_config_version(config_files):
+ main_file = config_files[0]
+ validate_top_level_object(main_file)
+ for next_file in config_files[1:]:
+ validate_top_level_object(next_file)
+
+ if main_file.version != next_file.version:
+ raise ConfigurationError(
+ "Version mismatch: file {0} specifies version {1} but "
+ "extension file {2} uses version {3}".format(
+ main_file.filename,
+ main_file.version,
+ next_file.filename,
+ next_file.version))
+
+
+def get_default_config_files(base_dir):
+ (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
+
+ if not candidates:
+ raise ComposeFileNotFound(SUPPORTED_FILENAMES)
+
+ winner = candidates[0]
+
+ if len(candidates) > 1:
+ log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
+ log.warn("Using %s\n", winner)
+
+ return [os.path.join(path, winner)] + get_default_override_file(path)
+
+
+def get_default_override_file(path):
+ override_files_in_path = [os.path.join(path, override_filename) for override_filename
+ in DEFAULT_OVERRIDE_FILENAMES
+ if os.path.exists(os.path.join(path, override_filename))]
+ if len(override_files_in_path) > 1:
+ raise DuplicateOverrideFileFound(override_files_in_path)
+ return override_files_in_path
+
+
+def find_candidates_in_parent_dirs(filenames, path):
+ """
+ Given a directory path to start, looks for filenames in the
+ directory, and then each parent directory successively,
+ until found.
+
+ Returns tuple (candidates, path).
+ """
+ candidates = [filename for filename in filenames
+ if os.path.exists(os.path.join(path, filename))]
+
+ if not candidates:
+ parent_dir = os.path.join(path, '..')
+ if os.path.abspath(parent_dir) != os.path.abspath(path):
+ return find_candidates_in_parent_dirs(filenames, parent_dir)
+
+ return (candidates, path)
+
+
+def check_swarm_only_config(service_dicts):
+ warning_template = (
+ "Some services ({services}) use the '{key}' key, which will be ignored. "
+ "Compose does not support '{key}' configuration - use "
+ "`docker stack deploy` to deploy to a swarm."
+ )
+
+ def check_swarm_only_key(service_dicts, key):
+ services = [s for s in service_dicts if s.get(key)]
+ if services:
+ log.warn(
+ warning_template.format(
+ services=", ".join(sorted(s['name'] for s in services)),
+ key=key
+ )
+ )
+
+ check_swarm_only_key(service_dicts, 'deploy')
+ check_swarm_only_key(service_dicts, 'credential_spec')
+ check_swarm_only_key(service_dicts, 'configs')
+
+
+def load(config_details):
+ """Load the configuration from a working directory and a list of
+ configuration files. Files are loaded in order, and merged on top
+ of each other to create the final configuration.
+
+ Return a fully interpolated, extended and validated configuration.
+ """
+ validate_config_version(config_details.config_files)
+
+ processed_files = [
+ process_config_file(config_file, config_details.environment)
+ for config_file in config_details.config_files
+ ]
+ config_details = config_details._replace(config_files=processed_files)
+
+ main_file = config_details.config_files[0]
+ volumes = load_mapping(
+ config_details.config_files, 'get_volumes', 'Volume'
+ )
+ networks = load_mapping(
+ config_details.config_files, 'get_networks', 'Network'
+ )
+ secrets = load_mapping(
+ config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir
+ )
+ configs = load_mapping(
+ config_details.config_files, 'get_configs', 'Config', config_details.working_dir
+ )
+ service_dicts = load_services(config_details, main_file)
+
+ if main_file.version != V1:
+ for service_dict in service_dicts:
+ match_named_volumes(service_dict, volumes)
+
+ check_swarm_only_config(service_dicts)
+
+ return Config(main_file.version, service_dicts, volumes, networks, secrets, configs)
+
+
+def load_mapping(config_files, get_func, entity_type, working_dir=None):
+ mapping = {}
+
+ for config_file in config_files:
+ for name, config in getattr(config_file, get_func)().items():
+ mapping[name] = config or {}
+ if not config:
+ continue
+
+ external = config.get('external')
+ if external:
+ name_field = 'name' if entity_type == 'Volume' else 'external_name'
+ validate_external(entity_type, name, config, config_file.version)
+ if isinstance(external, dict):
+ config[name_field] = external.get('name')
+ elif not config.get('name'):
+ config[name_field] = name
+
+ if 'driver_opts' in config:
+ config['driver_opts'] = build_string_dict(
+ config['driver_opts']
+ )
+
+ if 'labels' in config:
+ config['labels'] = parse_labels(config['labels'])
+
+ if 'file' in config:
+ config['file'] = expand_path(working_dir, config['file'])
+
+ return mapping
+
+
+def validate_external(entity_type, name, config, version):
+ if (version < V2_1 or (version >= V3_0 and version < V3_4)) and len(config.keys()) > 1:
+ raise ConfigurationError(
+ "{} {} declared as external but specifies additional attributes "
+ "({}).".format(
+ entity_type, name, ', '.join(k for k in config if k != 'external')))
+
+
+def load_services(config_details, config_file):
+ def build_service(service_name, service_dict, service_names):
+ service_config = ServiceConfig.with_abs_paths(
+ config_details.working_dir,
+ config_file.filename,
+ service_name,
+ service_dict)
+ resolver = ServiceExtendsResolver(
+ service_config, config_file, environment=config_details.environment
+ )
+ service_dict = process_service(resolver.run())
+
+ service_config = service_config._replace(config=service_dict)
+ validate_service(service_config, service_names, config_file)
+ service_dict = finalize_service(
+ service_config,
+ service_names,
+ config_file.version,
+ config_details.environment)
+ return service_dict
+
+ def build_services(service_config):
+ service_names = service_config.keys()
+ return sort_service_dicts([
+ build_service(name, service_dict, service_names)
+ for name, service_dict in service_config.items()
+ ])
+
+ def merge_services(base, override):
+ all_service_names = set(base) | set(override)
+ return {
+ name: merge_service_dicts_from_files(
+ base.get(name, {}),
+ override.get(name, {}),
+ config_file.version)
+ for name in all_service_names
+ }
+
+ service_configs = [
+ file.get_service_dicts() for file in config_details.config_files
+ ]
+
+ service_config = service_configs[0]
+ for next_config in service_configs[1:]:
+ service_config = merge_services(service_config, next_config)
+
+ return build_services(service_config)
+
+
+def interpolate_config_section(config_file, config, section, environment):
+ validate_config_section(config_file.filename, config, section)
+ return interpolate_environment_variables(
+ config_file.version,
+ config,
+ section,
+ environment
+ )
+
+
+def process_config_file(config_file, environment, service_name=None):
+ services = interpolate_config_section(
+ config_file,
+ config_file.get_service_dicts(),
+ 'service',
+ environment)
+
+ if config_file.version > V1:
+ processed_config = dict(config_file.config)
+ processed_config['services'] = services
+ processed_config['volumes'] = interpolate_config_section(
+ config_file,
+ config_file.get_volumes(),
+ 'volume',
+ environment)
+ processed_config['networks'] = interpolate_config_section(
+ config_file,
+ config_file.get_networks(),
+ 'network',
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_1:
+ processed_config['secrets'] = interpolate_config_section(
+ config_file,
+ config_file.get_secrets(),
+ 'secrets',
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_3:
+ processed_config['configs'] = interpolate_config_section(
+ config_file,
+ config_file.get_configs(),
+ 'configs',
+ environment
+ )
+ else:
+ processed_config = services
+
+ config_file = config_file._replace(config=processed_config)
+ validate_against_config_schema(config_file)
+
+ if service_name and service_name not in services:
+ raise ConfigurationError(
+ "Cannot extend service '{}' in {}: Service not found".format(
+ service_name, config_file.filename))
+
+ return config_file
+
+
+class ServiceExtendsResolver(object):
+ def __init__(self, service_config, config_file, environment, already_seen=None):
+ self.service_config = service_config
+ self.working_dir = service_config.working_dir
+ self.already_seen = already_seen or []
+ self.config_file = config_file
+ self.environment = environment
+
+ @property
+ def signature(self):
+ return self.service_config.filename, self.service_config.name
+
+ def detect_cycle(self):
+ if self.signature in self.already_seen:
+ raise CircularReference(self.already_seen + [self.signature])
+
+ def run(self):
+ self.detect_cycle()
+
+ if 'extends' in self.service_config.config:
+ service_dict = self.resolve_extends(*self.validate_and_construct_extends())
+ return self.service_config._replace(config=service_dict)
+
+ return self.service_config
+
+ def validate_and_construct_extends(self):
+ extends = self.service_config.config['extends']
+ if not isinstance(extends, dict):
+ extends = {'service': extends}
+
+ config_path = self.get_extended_config_path(extends)
+ service_name = extends['service']
+
+ if config_path == self.config_file.filename:
+ try:
+ service_config = self.config_file.get_service(service_name)
+ except KeyError:
+ raise ConfigurationError(
+ "Cannot extend service '{}' in {}: Service not found".format(
+ service_name, config_path)
+ )
+ else:
+ extends_file = ConfigFile.from_filename(config_path)
+ validate_config_version([self.config_file, extends_file])
+ extended_file = process_config_file(
+ extends_file, self.environment, service_name=service_name
+ )
+ service_config = extended_file.get_service(service_name)
+
+ return config_path, service_config, service_name
+
+ def resolve_extends(self, extended_config_path, service_dict, service_name):
+ resolver = ServiceExtendsResolver(
+ ServiceConfig.with_abs_paths(
+ os.path.dirname(extended_config_path),
+ extended_config_path,
+ service_name,
+ service_dict),
+ self.config_file,
+ already_seen=self.already_seen + [self.signature],
+ environment=self.environment
+ )
+
+ service_config = resolver.run()
+ other_service_dict = process_service(service_config)
+ validate_extended_service_dict(
+ other_service_dict,
+ extended_config_path,
+ service_name)
+
+ return merge_service_dicts(
+ other_service_dict,
+ self.service_config.config,
+ self.config_file.version)
+
+ def get_extended_config_path(self, extends_options):
+ """Service we are extending either has a value for 'file' set, which we
+ need to obtain a full path too or we are extending from a service
+ defined in our own file.
+ """
+ filename = self.service_config.filename
+ validate_extends_file_path(
+ self.service_config.name,
+ extends_options,
+ filename)
+ if 'file' in extends_options:
+ return expand_path(self.working_dir, extends_options['file'])
+ return filename
+
+
+def resolve_environment(service_dict, environment=None):
+ """Unpack any environment variables from an env_file, if set.
+ Interpolate environment values if set.
+ """
+ env = {}
+ for env_file in service_dict.get('env_file', []):
+ env.update(env_vars_from_file(env_file))
+
+ env.update(parse_environment(service_dict.get('environment')))
+ return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
+
+
+def resolve_build_args(buildargs, environment):
+ args = parse_build_arguments(buildargs)
+ return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
+
+
+def validate_extended_service_dict(service_dict, filename, service):
+ error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
+
+ if 'links' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'links' cannot be extended" % error_prefix)
+
+ if 'volumes_from' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'volumes_from' cannot be extended" % error_prefix)
+
+ if 'net' in service_dict:
+ if get_container_name_from_network_mode(service_dict['net']):
+ raise ConfigurationError(
+ "%s services with 'net: container' cannot be extended" % error_prefix)
+
+ if 'network_mode' in service_dict:
+ if get_service_name_from_network_mode(service_dict['network_mode']):
+ raise ConfigurationError(
+ "%s services with 'network_mode: service' cannot be extended" % error_prefix)
+
+ if 'depends_on' in service_dict:
+ raise ConfigurationError(
+ "%s services with 'depends_on' cannot be extended" % error_prefix)
+
+
+def validate_service(service_config, service_names, config_file):
+ service_dict, service_name = service_config.config, service_config.name
+ validate_service_constraints(service_dict, service_name, config_file)
+ validate_paths(service_dict)
+
+ validate_cpu(service_config)
+ validate_ulimits(service_config)
+ validate_network_mode(service_config, service_names)
+ validate_pid_mode(service_config, service_names)
+ validate_depends_on(service_config, service_names)
+ validate_links(service_config, service_names)
+
+ if not service_dict.get('image') and has_uppercase(service_name):
+ raise ConfigurationError(
+ "Service '{name}' contains uppercase characters which are not valid "
+ "as part of an image name. Either use a lowercase service name or "
+ "use the `image` field to set a custom name for the service image."
+ .format(name=service_name))
+
+
+def process_service(service_config):
+ working_dir = service_config.working_dir
+ service_dict = dict(service_config.config)
+
+ if 'env_file' in service_dict:
+ service_dict['env_file'] = [
+ expand_path(working_dir, path)
+ for path in to_list(service_dict['env_file'])
+ ]
+
+ if 'build' in service_dict:
+ process_build_section(service_dict, working_dir)
+
+ if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
+ service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
+
+ if 'sysctls' in service_dict:
+ service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls']))
+
+ if 'labels' in service_dict:
+ service_dict['labels'] = parse_labels(service_dict['labels'])
+
+ service_dict = process_depends_on(service_dict)
+
+ for field in ['dns', 'dns_search', 'tmpfs']:
+ if field in service_dict:
+ service_dict[field] = to_list(service_dict[field])
+
+ service_dict = process_blkio_config(process_ports(
+ process_healthcheck(service_dict, service_config.name)
+ ))
+
+ return service_dict
+
+
+def process_build_section(service_dict, working_dir):
+ if isinstance(service_dict['build'], six.string_types):
+ service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
+ elif isinstance(service_dict['build'], dict):
+ if 'context' in service_dict['build']:
+ path = service_dict['build']['context']
+ service_dict['build']['context'] = resolve_build_path(working_dir, path)
+ if 'labels' in service_dict['build']:
+ service_dict['build']['labels'] = parse_labels(service_dict['build']['labels'])
+
+
+def process_ports(service_dict):
+ if 'ports' not in service_dict:
+ return service_dict
+
+ ports = []
+ for port_definition in service_dict['ports']:
+ if isinstance(port_definition, ServicePort):
+ ports.append(port_definition)
+ else:
+ ports.extend(ServicePort.parse(port_definition))
+ service_dict['ports'] = ports
+ return service_dict
+
+
+def process_depends_on(service_dict):
+ if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
+ service_dict['depends_on'] = dict([
+ (svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
+ ])
+ return service_dict
+
+
+def process_blkio_config(service_dict):
+ if not service_dict.get('blkio_config'):
+ return service_dict
+
+ for field in ['device_read_bps', 'device_write_bps']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ rate = v.get('rate', 0)
+ v['rate'] = parse_bytes(rate)
+ if v['rate'] is None:
+ raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate))
+
+ for field in ['device_read_iops', 'device_write_iops']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ try:
+ v['rate'] = int(v.get('rate', 0))
+ except ValueError:
+ raise ConfigurationError(
+ 'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate'))
+ )
+
+ return service_dict
+
+
+def process_healthcheck(service_dict, service_name):
+ if 'healthcheck' not in service_dict:
+ return service_dict
+
+ hc = {}
+ raw = service_dict['healthcheck']
+
+ if raw.get('disable'):
+ if len(raw) > 1:
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ '"disable: true" cannot be combined with other options'
+ .format(service_name))
+ hc['test'] = ['NONE']
+ elif 'test' in raw:
+ hc['test'] = raw['test']
+
+ for field in ['interval', 'timeout', 'start_period']:
+ if field in raw:
+ if not isinstance(raw[field], six.integer_types):
+ hc[field] = parse_nanoseconds_int(raw[field])
+ else: # Conversion has been done previously
+ hc[field] = raw[field]
+ if 'retries' in raw:
+ hc['retries'] = raw['retries']
+
+ service_dict['healthcheck'] = hc
+ return service_dict
+
+
+def finalize_service(service_config, service_names, version, environment):
+ service_dict = dict(service_config.config)
+
+ if 'environment' in service_dict or 'env_file' in service_dict:
+ service_dict['environment'] = resolve_environment(service_dict, environment)
+ service_dict.pop('env_file', None)
+
+ if 'volumes_from' in service_dict:
+ service_dict['volumes_from'] = [
+ VolumeFromSpec.parse(vf, service_names, version)
+ for vf in service_dict['volumes_from']
+ ]
+
+ if 'volumes' in service_dict:
+ service_dict['volumes'] = [
+ VolumeSpec.parse(
+ v, environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+ ) for v in service_dict['volumes']
+ ]
+
+ if 'net' in service_dict:
+ network_mode = service_dict.pop('net')
+ container_name = get_container_name_from_network_mode(network_mode)
+ if container_name and container_name in service_names:
+ service_dict['network_mode'] = 'service:{}'.format(container_name)
+ else:
+ service_dict['network_mode'] = network_mode
+
+ if 'networks' in service_dict:
+ service_dict['networks'] = parse_networks(service_dict['networks'])
+
+ if 'restart' in service_dict:
+ service_dict['restart'] = parse_restart_spec(service_dict['restart'])
+
+ if 'secrets' in service_dict:
+ service_dict['secrets'] = [
+ types.ServiceSecret.parse(s) for s in service_dict['secrets']
+ ]
+
+ if 'configs' in service_dict:
+ service_dict['configs'] = [
+ types.ServiceConfig.parse(c) for c in service_dict['configs']
+ ]
+
+ normalize_build(service_dict, service_config.working_dir, environment)
+
+ service_dict['name'] = service_config.name
+ return normalize_v1_service_format(service_dict)
+
+
+def normalize_v1_service_format(service_dict):
+ if 'log_driver' in service_dict or 'log_opt' in service_dict:
+ if 'logging' not in service_dict:
+ service_dict['logging'] = {}
+ if 'log_driver' in service_dict:
+ service_dict['logging']['driver'] = service_dict['log_driver']
+ del service_dict['log_driver']
+ if 'log_opt' in service_dict:
+ service_dict['logging']['options'] = service_dict['log_opt']
+ del service_dict['log_opt']
+
+ if 'dockerfile' in service_dict:
+ service_dict['build'] = service_dict.get('build', {})
+ service_dict['build'].update({
+ 'dockerfile': service_dict.pop('dockerfile')
+ })
+
+ return service_dict
+
+
+def merge_service_dicts_from_files(base, override, version):
+ """When merging services from multiple files we need to merge the `extends`
+ field. This is not handled by `merge_service_dicts()` which is used to
+ perform the `extends`.
+ """
+ new_service = merge_service_dicts(base, override, version)
+ if 'extends' in override:
+ new_service['extends'] = override['extends']
+ elif 'extends' in base:
+ new_service['extends'] = base['extends']
+ return new_service
+
+
+class MergeDict(dict):
+ """A dict-like object responsible for merging two dicts into one."""
+
+ def __init__(self, base, override):
+ self.base = base
+ self.override = override
+
+ def needs_merge(self, field):
+ return field in self.base or field in self.override
+
+ def merge_field(self, field, merge_func, default=None):
+ if not self.needs_merge(field):
+ return
+
+ self[field] = merge_func(
+ self.base.get(field, default),
+ self.override.get(field, default))
+
+ def merge_mapping(self, field, parse_func):
+ if not self.needs_merge(field):
+ return
+
+ self[field] = parse_func(self.base.get(field))
+ self[field].update(parse_func(self.override.get(field)))
+
+ def merge_sequence(self, field, parse_func):
+ def parse_sequence_func(seq):
+ return to_mapping((parse_func(item) for item in seq), 'merge_field')
+
+ if not self.needs_merge(field):
+ return
+
+ merged = parse_sequence_func(self.base.get(field, []))
+ merged.update(parse_sequence_func(self.override.get(field, [])))
+ self[field] = [item.repr() for item in sorted(merged.values())]
+
+ def merge_scalar(self, field):
+ if self.needs_merge(field):
+ self[field] = self.override.get(field, self.base.get(field))
+
+
+def merge_service_dicts(base, override, version):
+ md = MergeDict(base, override)
+
+ md.merge_mapping('environment', parse_environment)
+ md.merge_mapping('labels', parse_labels)
+ md.merge_mapping('ulimits', parse_flat_dict)
+ md.merge_mapping('networks', parse_networks)
+ md.merge_mapping('sysctls', parse_sysctls)
+ md.merge_mapping('depends_on', parse_depends_on)
+ md.merge_sequence('links', ServiceLink.parse)
+ md.merge_sequence('secrets', types.ServiceSecret.parse)
+ md.merge_sequence('configs', types.ServiceConfig.parse)
+ md.merge_mapping('deploy', parse_deploy)
+ md.merge_mapping('extra_hosts', parse_extra_hosts)
+
+ for field in ['volumes', 'devices']:
+ md.merge_field(field, merge_path_mappings)
+
+ for field in [
+ 'cap_add', 'cap_drop', 'expose', 'external_links',
+ 'security_opt', 'volumes_from',
+ ]:
+ md.merge_field(field, merge_unique_items_lists, default=[])
+
+ for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
+ md.merge_field(field, merge_list_or_string)
+
+ md.merge_field('logging', merge_logging, default={})
+ merge_ports(md, base, override)
+ md.merge_field('blkio_config', merge_blkio_config, default={})
+ md.merge_field('healthcheck', merge_healthchecks, default={})
+
+ for field in set(ALLOWED_KEYS) - set(md):
+ md.merge_scalar(field)
+
+ if version == V1:
+ legacy_v1_merge_image_or_build(md, base, override)
+ elif md.needs_merge('build'):
+ md['build'] = merge_build(md, base, override)
+
+ return dict(md)
+
+
+def merge_unique_items_lists(base, override):
+ override = [str(o) for o in override]
+ base = [str(b) for b in base]
+ return sorted(set().union(base, override))
+
+
+def merge_healthchecks(base, override):
+ if override.get('disabled') is True:
+ return override
+ result = base.copy()
+ result.update(override)
+ return result
+
+
+def merge_ports(md, base, override):
+ def parse_sequence_func(seq):
+ acc = []
+ for item in seq:
+ acc.extend(ServicePort.parse(item))
+ return to_mapping(acc, 'merge_field')
+
+ field = 'ports'
+
+ if not md.needs_merge(field):
+ return
+
+ merged = parse_sequence_func(md.base.get(field, []))
+ merged.update(parse_sequence_func(md.override.get(field, [])))
+ md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
+
+
+def merge_build(output, base, override):
+ def to_dict(service):
+ build_config = service.get('build', {})
+ if isinstance(build_config, six.string_types):
+ return {'context': build_config}
+ return build_config
+
+ md = MergeDict(to_dict(base), to_dict(override))
+ md.merge_scalar('context')
+ md.merge_scalar('dockerfile')
+ md.merge_scalar('network')
+ md.merge_scalar('target')
+ md.merge_scalar('shm_size')
+ md.merge_mapping('args', parse_build_arguments)
+ md.merge_field('cache_from', merge_unique_items_lists, default=[])
+ md.merge_mapping('labels', parse_labels)
+ return dict(md)
+
+
+def merge_blkio_config(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('weight')
+
+ def merge_blkio_limits(base, override):
+ index = dict((b['path'], b) for b in base)
+ for o in override:
+ index[o['path']] = o
+
+ return sorted(list(index.values()), key=lambda x: x['path'])
+
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ md.merge_field(field, merge_blkio_limits, default=[])
+
+ return dict(md)
+
+
+def merge_logging(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('driver')
+ if md.get('driver') == base.get('driver') or base.get('driver') is None:
+ md.merge_mapping('options', lambda m: m or {})
+ elif override.get('options'):
+ md['options'] = override.get('options', {})
+ return dict(md)
+
+
+def legacy_v1_merge_image_or_build(output, base, override):
+ output.pop('image', None)
+ output.pop('build', None)
+ if 'image' in override:
+ output['image'] = override['image']
+ elif 'build' in override:
+ output['build'] = override['build']
+ elif 'image' in base:
+ output['image'] = base['image']
+ elif 'build' in base:
+ output['build'] = base['build']
+
+
+def merge_environment(base, override):
+ env = parse_environment(base)
+ env.update(parse_environment(override))
+ return env
+
+
+def split_kv(kvpair):
+ if '=' in kvpair:
+ return kvpair.split('=', 1)
+ else:
+ return kvpair, ''
+
+
+def parse_dict_or_list(split_func, type_name, arguments):
+ if not arguments:
+ return {}
+
+ if isinstance(arguments, list):
+ return dict(split_func(e) for e in arguments)
+
+ if isinstance(arguments, dict):
+ return dict(arguments)
+
+ raise ConfigurationError(
+ "%s \"%s\" must be a list or mapping," %
+ (type_name, arguments)
+ )
+
+
+parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
+parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
+parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels')
+parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
+parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
+parse_depends_on = functools.partial(
+ parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
+)
+parse_deploy = functools.partial(parse_dict_or_list, split_kv, 'deploy')
+
+
+def parse_flat_dict(d):
+ if not d:
+ return {}
+
+ if isinstance(d, dict):
+ return dict(d)
+
+ raise ConfigurationError("Invalid type: expected mapping")
+
+
+def resolve_env_var(key, val, environment):
+ if val is not None:
+ return key, val
+ elif environment and key in environment:
+ return key, environment[key]
+ else:
+ return key, None
+
+
+def resolve_volume_paths(working_dir, service_dict):
+ return [
+ resolve_volume_path(working_dir, volume)
+ for volume in service_dict['volumes']
+ ]
+
+
+def resolve_volume_path(working_dir, volume):
+ mount_params = None
+ if isinstance(volume, dict):
+ container_path = volume.get('target')
+ host_path = volume.get('source')
+ mode = None
+ if host_path:
+ if volume.get('read_only'):
+ mode = 'ro'
+ if volume.get('volume', {}).get('nocopy'):
+ mode = 'nocopy'
+ mount_params = (host_path, mode)
+ else:
+ container_path, mount_params = split_path_mapping(volume)
+
+ if mount_params is not None:
+ host_path, mode = mount_params
+ if host_path is None:
+ return container_path
+ if host_path.startswith('.'):
+ host_path = expand_path(working_dir, host_path)
+ host_path = os.path.expanduser(host_path)
+ return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
+
+ return container_path
+
+
+def normalize_build(service_dict, working_dir, environment):
+
+ if 'build' in service_dict:
+ build = {}
+ # Shortcut where specifying a string is treated as the build context
+ if isinstance(service_dict['build'], six.string_types):
+ build['context'] = service_dict.pop('build')
+ else:
+ build.update(service_dict['build'])
+ if 'args' in build:
+ build['args'] = build_string_dict(
+ resolve_build_args(build.get('args'), environment)
+ )
+
+ service_dict['build'] = build
+
+
+def resolve_build_path(working_dir, build_path):
+ if is_url(build_path):
+ return build_path
+ return expand_path(working_dir, build_path)
+
+
+def is_url(build_path):
+ return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
+
+
+def validate_paths(service_dict):
+ if 'build' in service_dict:
+ build = service_dict.get('build', {})
+
+ if isinstance(build, six.string_types):
+ build_path = build
+ elif isinstance(build, dict) and 'context' in build:
+ build_path = build['context']
+ else:
+ # We have a build section but no context, so nothing to validate
+ return
+
+ if (
+ not is_url(build_path) and
+ (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
+ ):
+ raise ConfigurationError(
+ "build path %s either does not exist, is not accessible, "
+ "or is not a valid URL." % build_path)
+
+
+def merge_path_mappings(base, override):
+ d = dict_from_path_mappings(base)
+ d.update(dict_from_path_mappings(override))
+ return path_mappings_from_dict(d)
+
+
+def dict_from_path_mappings(path_mappings):
+ if path_mappings:
+ return dict(split_path_mapping(v) for v in path_mappings)
+ else:
+ return {}
+
+
+def path_mappings_from_dict(d):
+ return [join_path_mapping(v) for v in sorted(d.items())]
+
+
+def split_path_mapping(volume_path):
+ """
+ Ascertain if the volume_path contains a host path as well as a container
+ path. Using splitdrive so windows absolute paths won't cause issues with
+ splitting on ':'.
+ """
+ if isinstance(volume_path, dict):
+ return (volume_path.get('target'), volume_path)
+ drive, volume_config = splitdrive(volume_path)
+
+ if ':' in volume_config:
+ (host, container) = volume_config.split(':', 1)
+ container_drive, container_path = splitdrive(container)
+ mode = None
+ if ':' in container_path:
+ container_path, mode = container_path.rsplit(':', 1)
+
+ return (container_drive + container_path, (drive + host, mode))
+ else:
+ return (volume_path, None)
+
+
+def join_path_mapping(pair):
+ (container, host) = pair
+ if isinstance(host, dict):
+ return host
+ elif host is None:
+ return container
+ else:
+ host, mode = host
+ result = ":".join((host, container))
+ if mode:
+ result += ":" + mode
+ return result
+
+
+def expand_path(working_dir, path):
+ return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
+
+
+def merge_list_or_string(base, override):
+ return to_list(base) + to_list(override)
+
+
+def to_list(value):
+ if value is None:
+ return []
+ elif isinstance(value, six.string_types):
+ return [value]
+ else:
+ return value
+
+
+def to_mapping(sequence, key_field):
+ return {getattr(item, key_field): item for item in sequence}
+
+
+def has_uppercase(name):
+ return any(char in string.ascii_uppercase for char in name)
+
+
+def load_yaml(filename):
+ try:
+ with open(filename, 'r') as fh:
+ return yaml.safe_load(fh)
+ except (IOError, yaml.YAMLError) as e:
+ error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
+ raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/compose/config/config_schema_v1.json b/compose/config/config_schema_v1.json
new file mode 100644
index 00000000..94354cda
--- /dev/null
+++ b/compose/config/config_schema_v1.json
@@ -0,0 +1,188 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v1.json",
+
+ "type": "object",
+
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "build": {"type": "string"},
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "dockerfile": {"type": "string"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "log_driver": {"type": "string"},
+ "log_opt": {"type": "object"},
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "memswap_limit": {"type": ["number", "string"]},
+ "mem_swappiness": {"type": "integer"},
+ "net": {"type": "string"},
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_signal": {"type": "string"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {
+ "required": ["build"],
+ "not": {"required": ["image"]}
+ },
+ {
+ "required": ["image"],
+ "not": {"anyOf": [
+ {"required": ["build"]},
+ {"required": ["dockerfile"]}
+ ]}
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
new file mode 100644
index 00000000..2ad62ac5
--- /dev/null
+++ b/compose/config/config_schema_v2.0.json
@@ -0,0 +1,389 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.0.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
new file mode 100644
index 00000000..24e6ba02
--- /dev/null
+++ b/compose/config/config_schema_v2.1.json
@@ -0,0 +1,441 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.1.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
new file mode 100644
index 00000000..86fc5df9
--- /dev/null
+++ b/compose/config/config_schema_v2.2.json
@@ -0,0 +1,448 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.2.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
new file mode 100644
index 00000000..ceaf4495
--- /dev/null
+++ b/compose/config/config_schema_v2.3.json
@@ -0,0 +1,451 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.3.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "start_period": {"type": "string"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.0.json b/compose/config/config_schema_v3.0.json
new file mode 100644
index 00000000..f39344cf
--- /dev/null
+++ b/compose/config/config_schema_v3.0.json
@@ -0,0 +1,384 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.0.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.1.json b/compose/config/config_schema_v3.1.json
new file mode 100644
index 00000000..719c0fa7
--- /dev/null
+++ b/compose/config/config_schema_v3.1.json
@@ -0,0 +1,429 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.1.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.2.json b/compose/config/config_schema_v3.2.json
new file mode 100644
index 00000000..2ca8e92d
--- /dev/null
+++ b/compose/config/config_schema_v3.2.json
@@ -0,0 +1,476 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.2.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.3.json b/compose/config/config_schema_v3.3.json
new file mode 100644
index 00000000..f1eb9a66
--- /dev/null
+++ b/compose/config/config_schema_v3.3.json
@@ -0,0 +1,535 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.3.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.4.json b/compose/config/config_schema_v3.4.json
new file mode 100644
index 00000000..dae7d7d2
--- /dev/null
+++ b/compose/config/config_schema_v3.4.json
@@ -0,0 +1,544 @@
+
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.4.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.5.json b/compose/config/config_schema_v3.5.json
new file mode 100644
index 00000000..fa95d6a2
--- /dev/null
+++ b/compose/config/config_schema_v3.5.json
@@ -0,0 +1,542 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.5.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
new file mode 100644
index 00000000..4ba228c8
--- /dev/null
+++ b/compose/config/environment.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import codecs
+import contextlib
+import logging
+import os
+
+import six
+
+from ..const import IS_WINDOWS_PLATFORM
+from .errors import ConfigurationError
+
+log = logging.getLogger(__name__)
+
+
+def split_env(env):
+ if isinstance(env, six.binary_type):
+ env = env.decode('utf-8', 'replace')
+ if '=' in env:
+ return env.split('=', 1)
+ else:
+ return env, None
+
+
+def env_vars_from_file(filename):
+ """
+ Read in a line delimited file of environment variables.
+ """
+ if not os.path.exists(filename):
+ raise ConfigurationError("Couldn't find env file: %s" % filename)
+ elif not os.path.isfile(filename):
+ raise ConfigurationError("%s is not a file." % (filename))
+ env = {}
+ with contextlib.closing(codecs.open(filename, 'r', 'utf-8')) as fileobj:
+ for line in fileobj:
+ line = line.strip()
+ if line and not line.startswith('#'):
+ k, v = split_env(line)
+ env[k] = v
+ return env
+
+
+class Environment(dict):
+ def __init__(self, *args, **kwargs):
+ super(Environment, self).__init__(*args, **kwargs)
+ self.missing_keys = []
+
+ @classmethod
+ def from_env_file(cls, base_dir):
+ def _initialize():
+ result = cls()
+ if base_dir is None:
+ return result
+ env_file_path = os.path.join(base_dir, '.env')
+ try:
+ return cls(env_vars_from_file(env_file_path))
+ except ConfigurationError:
+ pass
+ return result
+ instance = _initialize()
+ instance.update(os.environ)
+ return instance
+
+ @classmethod
+ def from_command_line(cls, parsed_env_opts):
+ result = cls()
+ for k, v in parsed_env_opts.items():
+ # Values from the command line take priority, unless they're unset
+ # in which case they take the value from the system's environment
+ if v is None and k in os.environ:
+ result[k] = os.environ[k]
+ else:
+ result[k] = v
+ return result
+
+ def __getitem__(self, key):
+ try:
+ return super(Environment, self).__getitem__(key)
+ except KeyError:
+ if IS_WINDOWS_PLATFORM:
+ try:
+ return super(Environment, self).__getitem__(key.upper())
+ except KeyError:
+ pass
+ if key not in self.missing_keys:
+ log.warn(
+ "The {} variable is not set. Defaulting to a blank string."
+ .format(key)
+ )
+ self.missing_keys.append(key)
+
+ return ""
+
+ def __contains__(self, key):
+ result = super(Environment, self).__contains__(key)
+ if IS_WINDOWS_PLATFORM:
+ return (
+ result or super(Environment, self).__contains__(key.upper())
+ )
+ return result
+
+ def get(self, key, *args, **kwargs):
+ if IS_WINDOWS_PLATFORM:
+ return super(Environment, self).get(
+ key,
+ super(Environment, self).get(key.upper(), *args, **kwargs)
+ )
+ return super(Environment, self).get(key, *args, **kwargs)
+
+ def get_boolean(self, key):
+ # Convert a value to a boolean using "common sense" rules.
+ # Unset, empty, "0" and "false" (i-case) yield False.
+ # All other values yield True.
+ value = self.get(key)
+ if not value:
+ return False
+ if value.lower() in ['0', 'false']:
+ return False
+ return True
diff --git a/compose/config/errors.py b/compose/config/errors.py
new file mode 100644
index 00000000..f5c03808
--- /dev/null
+++ b/compose/config/errors.py
@@ -0,0 +1,55 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+
+VERSION_EXPLANATION = (
+ 'You might be seeing this error because you\'re using the wrong Compose file version. '
+ 'Either specify a supported version (e.g "2.2" or "3.3") and place '
+ 'your service definitions under the `services` key, or omit the `version` key '
+ 'and place your service definitions at the root of the file to use '
+ 'version 1.\nFor more on the Compose file format versions, see '
+ 'https://docs.docker.com/compose/compose-file/')
+
+
+class ConfigurationError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class DependencyError(ConfigurationError):
+ pass
+
+
+class CircularReference(ConfigurationError):
+ def __init__(self, trail):
+ self.trail = trail
+
+ @property
+ def msg(self):
+ lines = [
+ "{} in {}".format(service_name, filename)
+ for (filename, service_name) in self.trail
+ ]
+ return "Circular reference:\n {}".format("\n extends ".join(lines))
+
+
+class ComposeFileNotFound(ConfigurationError):
+ def __init__(self, supported_filenames):
+ super(ComposeFileNotFound, self).__init__("""
+ Can't find a suitable configuration file in this directory or any
+ parent. Are you in the right directory?
+
+ Supported filenames: %s
+ """ % ", ".join(supported_filenames))
+
+
+class DuplicateOverrideFileFound(ConfigurationError):
+ def __init__(self, override_filenames):
+ self.override_filenames = override_filenames
+ super(DuplicateOverrideFileFound, self).__init__(
+ "Multiple override files found: {}. You may only use a single "
+ "override file.".format(", ".join(override_filenames))
+ )
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
new file mode 100644
index 00000000..b13ac591
--- /dev/null
+++ b/compose/config/interpolation.py
@@ -0,0 +1,102 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+from string import Template
+
+import six
+
+from .errors import ConfigurationError
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+log = logging.getLogger(__name__)
+
+
+class Interpolator(object):
+
+ def __init__(self, templater, mapping):
+ self.templater = templater
+ self.mapping = mapping
+
+ def interpolate(self, string):
+ try:
+ return self.templater(string).substitute(self.mapping)
+ except ValueError:
+ raise InvalidInterpolation(string)
+
+
+def interpolate_environment_variables(version, config, section, environment):
+ if version <= V2_0:
+ interpolator = Interpolator(Template, environment)
+ else:
+ interpolator = Interpolator(TemplateWithDefaults, environment)
+
+ def process_item(name, config_dict):
+ return dict(
+ (key, interpolate_value(name, key, val, section, interpolator))
+ for key, val in (config_dict or {}).items()
+ )
+
+ return dict(
+ (name, process_item(name, config_dict or {}))
+ for name, config_dict in config.items()
+ )
+
+
+def interpolate_value(name, config_key, value, section, interpolator):
+ try:
+ return recursive_interpolate(value, interpolator)
+ except InvalidInterpolation as e:
+ raise ConfigurationError(
+ 'Invalid interpolation format for "{config_key}" option '
+ 'in {section} "{name}": "{string}"'.format(
+ config_key=config_key,
+ name=name,
+ section=section,
+ string=e.string))
+
+
+def recursive_interpolate(obj, interpolator):
+ if isinstance(obj, six.string_types):
+ return interpolator.interpolate(obj)
+ if isinstance(obj, dict):
+ return dict(
+ (key, recursive_interpolate(val, interpolator))
+ for (key, val) in obj.items()
+ )
+ if isinstance(obj, list):
+ return [recursive_interpolate(val, interpolator) for val in obj]
+ return obj
+
+
+class TemplateWithDefaults(Template):
+ idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
+
+ # Modified from python2.7/string.py
+ def substitute(self, mapping):
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ if ':-' in named:
+ var, _, default = named.partition(':-')
+ return mapping.get(var) or default
+ if '-' in named:
+ var, _, default = named.partition('-')
+ return mapping.get(var, default)
+ val = mapping[named]
+ return '%s' % (val,)
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+
+class InvalidInterpolation(Exception):
+ def __init__(self, string):
+ self.string = string
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
new file mode 100644
index 00000000..2b8c73f1
--- /dev/null
+++ b/compose/config/serialize.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+import yaml
+
+from compose.config import types
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_4 as V3_4
+
+
+def serialize_config_type(dumper, data):
+ representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+ return representer(data.repr())
+
+
+def serialize_dict_type(dumper, data):
+ return dumper.represent_dict(data.repr())
+
+
+def serialize_string(dumper, data):
+ """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+
+ data = data.replace('$', '$$')
+
+ if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
+ # Empirically only y/n appears to be an issue, but this might change
+ # depending on which PyYaml version is being used. Err on safe side.
+ return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
+ return representer(data)
+
+
+yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
+yaml.SafeDumper.add_representer(str, serialize_string)
+yaml.SafeDumper.add_representer(six.text_type, serialize_string)
+
+
+def denormalize_config(config, image_digests=None):
+ result = {'version': str(V2_1) if config.version == V1 else str(config.version)}
+ denormalized_services = [
+ denormalize_service_dict(
+ service_dict,
+ config.version,
+ image_digests[service_dict['name']] if image_digests else None)
+ for service_dict in config.services
+ ]
+ result['services'] = {
+ service_dict.pop('name'): service_dict
+ for service_dict in denormalized_services
+ }
+
+ for key in ('networks', 'volumes', 'secrets', 'configs'):
+ config_dict = getattr(config, key)
+ if not config_dict:
+ continue
+ result[key] = config_dict.copy()
+ for name, conf in result[key].items():
+ if 'external_name' in conf:
+ del conf['external_name']
+
+ if 'name' in conf:
+ if config.version < V2_1 or (config.version >= V3_0 and config.version < V3_4):
+ del conf['name']
+ elif 'external' in conf:
+ conf['external'] = True
+
+ return result
+
+
+def serialize_config(config, image_digests=None):
+ return yaml.safe_dump(
+ denormalize_config(config, image_digests),
+ default_flow_style=False,
+ indent=2,
+ width=80
+ )
+
+
+def serialize_ns_time_value(value):
+ result = (value, 'ns')
+ table = [
+ (1000., 'us'),
+ (1000., 'ms'),
+ (1000., 's'),
+ (60., 'm'),
+ (60., 'h')
+ ]
+ for stage in table:
+ tmp = value / stage[0]
+ if tmp == int(value / stage[0]):
+ value = tmp
+ result = (int(value), stage[1])
+ else:
+ break
+ return '{0}{1}'.format(*result)
+
+
+def denormalize_service_dict(service_dict, version, image_digest=None):
+ service_dict = service_dict.copy()
+
+ if image_digest:
+ service_dict['image'] = image_digest
+
+ if 'restart' in service_dict:
+ service_dict['restart'] = types.serialize_restart_spec(
+ service_dict['restart']
+ )
+
+ if version == V1 and 'network_mode' not in service_dict:
+ service_dict['network_mode'] = 'bridge'
+
+ if 'depends_on' in service_dict and (version < V2_1 or version >= V3_0):
+ service_dict['depends_on'] = sorted([
+ svc for svc in service_dict['depends_on'].keys()
+ ])
+
+ if 'healthcheck' in service_dict:
+ if 'interval' in service_dict['healthcheck']:
+ service_dict['healthcheck']['interval'] = serialize_ns_time_value(
+ service_dict['healthcheck']['interval']
+ )
+ if 'timeout' in service_dict['healthcheck']:
+ service_dict['healthcheck']['timeout'] = serialize_ns_time_value(
+ service_dict['healthcheck']['timeout']
+ )
+
+ if 'start_period' in service_dict['healthcheck']:
+ service_dict['healthcheck']['start_period'] = serialize_ns_time_value(
+ service_dict['healthcheck']['start_period']
+ )
+ if 'ports' in service_dict and version < V3_2:
+ service_dict['ports'] = [
+ p.legacy_repr() if isinstance(p, types.ServicePort) else p
+ for p in service_dict['ports']
+ ]
+
+ return service_dict
diff --git a/compose/config/sort_services.py b/compose/config/sort_services.py
new file mode 100644
index 00000000..42f548a6
--- /dev/null
+++ b/compose/config/sort_services.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose.config.errors import DependencyError
+
+
+def get_service_name_from_network_mode(network_mode):
+ return get_source_name_from_network_mode(network_mode, 'service')
+
+
+def get_container_name_from_network_mode(network_mode):
+ return get_source_name_from_network_mode(network_mode, 'container')
+
+
+def get_source_name_from_network_mode(network_mode, source_type):
+ if not network_mode:
+ return
+
+ if not network_mode.startswith(source_type+':'):
+ return
+
+ _, net_name = network_mode.split(':', 1)
+ return net_name
+
+
+def get_service_names(links):
+ return [link.split(':')[0] for link in links]
+
+
+def get_service_names_from_volumes_from(volumes_from):
+ return [volume_from.source for volume_from in volumes_from]
+
+
+def get_service_dependents(service_dict, services):
+ name = service_dict['name']
+ return [
+ service for service in services
+ if (name in get_service_names(service.get('links', [])) or
+ name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
+ name == get_service_name_from_network_mode(service.get('network_mode')) or
+ name == get_service_name_from_network_mode(service.get('pid')) or
+ name in service.get('depends_on', []))
+ ]
+
+
+def sort_service_dicts(services):
+ # Topological sort (Cormen/Tarjan algorithm).
+ unmarked = services[:]
+ temporary_marked = set()
+ sorted_services = []
+
+ def visit(n):
+ if n['name'] in temporary_marked:
+ if n['name'] in get_service_names(n.get('links', [])):
+ raise DependencyError('A service can not link to itself: %s' % n['name'])
+ if n['name'] in n.get('volumes_from', []):
+ raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
+ if n['name'] in n.get('depends_on', []):
+ raise DependencyError('A service can not depend on itself: %s' % n['name'])
+ raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked))
+
+ if n in unmarked:
+ temporary_marked.add(n['name'])
+ for m in get_service_dependents(n, services):
+ visit(m)
+ temporary_marked.remove(n['name'])
+ unmarked.remove(n)
+ sorted_services.insert(0, n)
+
+ while unmarked:
+ visit(unmarked[-1])
+
+ return sorted_services
diff --git a/compose/config/types.py b/compose/config/types.py
new file mode 100644
index 00000000..c410343b
--- /dev/null
+++ b/compose/config/types.py
@@ -0,0 +1,351 @@
+"""
+Types for objects parsed from the configuration.
+"""
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import re
+from collections import namedtuple
+
+import six
+from docker.utils.ports import build_port_bindings
+
+from ..const import COMPOSEFILE_V1 as V1
+from .errors import ConfigurationError
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import splitdrive
+
+win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*')
+
+
+class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
+
+ # TODO: drop service_names arg when v1 is removed
+ @classmethod
+ def parse(cls, volume_from_config, service_names, version):
+ func = cls.parse_v1 if version == V1 else cls.parse_v2
+ return func(service_names, volume_from_config)
+
+ @classmethod
+ def parse_v1(cls, service_names, volume_from_config):
+ parts = volume_from_config.split(':')
+ if len(parts) > 2:
+ raise ConfigurationError(
+ "volume_from {} has incorrect format, should be "
+ "service[:mode]".format(volume_from_config))
+
+ if len(parts) == 1:
+ source = parts[0]
+ mode = 'rw'
+ else:
+ source, mode = parts
+
+ type = 'service' if source in service_names else 'container'
+ return cls(source, mode, type)
+
+ @classmethod
+ def parse_v2(cls, service_names, volume_from_config):
+ parts = volume_from_config.split(':')
+ if len(parts) > 3:
+ raise ConfigurationError(
+ "volume_from {} has incorrect format, should be one of "
+ "'<service name>[:<mode>]' or "
+ "'container:<container name>[:<mode>]'".format(volume_from_config))
+
+ if len(parts) == 1:
+ source = parts[0]
+ return cls(source, 'rw', 'service')
+
+ if len(parts) == 2:
+ if parts[0] == 'container':
+ type, source = parts
+ return cls(source, 'rw', type)
+
+ source, mode = parts
+ return cls(source, mode, 'service')
+
+ if len(parts) == 3:
+ type, source, mode = parts
+ if type not in ('service', 'container'):
+ raise ConfigurationError(
+ "Unknown volumes_from type '{}' in '{}'".format(
+ type,
+ volume_from_config))
+
+ return cls(source, mode, type)
+
+ def repr(self):
+ return '{v.type}:{v.source}:{v.mode}'.format(v=self)
+
+
+def parse_restart_spec(restart_config):
+ if not restart_config:
+ return None
+ parts = restart_config.split(':')
+ if len(parts) > 2:
+ raise ConfigurationError(
+ "Restart %s has incorrect format, should be "
+ "mode[:max_retry]" % restart_config)
+ if len(parts) == 2:
+ name, max_retry_count = parts
+ else:
+ name, = parts
+ max_retry_count = 0
+
+ return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
+
+
+def serialize_restart_spec(restart_spec):
+ if not restart_spec:
+ return ''
+ parts = [restart_spec['Name']]
+ if restart_spec['MaximumRetryCount']:
+ parts.append(six.text_type(restart_spec['MaximumRetryCount']))
+ return ':'.join(parts)
+
+
+def parse_extra_hosts(extra_hosts_config):
+ if not extra_hosts_config:
+ return {}
+
+ if isinstance(extra_hosts_config, dict):
+ return dict(extra_hosts_config)
+
+ if isinstance(extra_hosts_config, list):
+ extra_hosts_dict = {}
+ for extra_hosts_line in extra_hosts_config:
+ # TODO: validate string contains ':' ?
+ host, ip = extra_hosts_line.split(':', 1)
+ extra_hosts_dict[host.strip()] = ip.strip()
+ return extra_hosts_dict
+
+
+def normalize_path_for_engine(path):
+ """Windows paths, c:\my\path\shiny, need to be changed to be compatible with
+ the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
+ """
+ drive, tail = splitdrive(path)
+
+ if drive:
+ path = '/' + drive.lower().rstrip(':') + tail
+
+ return path.replace('\\', '/')
+
+
+class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
+
+ @classmethod
+ def _parse_unix(cls, volume_config):
+ parts = volume_config.split(':')
+
+ if len(parts) > 3:
+ raise ConfigurationError(
+ "Volume %s has incorrect format, should be "
+ "external:internal[:mode]" % volume_config)
+
+ if len(parts) == 1:
+ external = None
+ internal = os.path.normpath(parts[0])
+ else:
+ external = os.path.normpath(parts[0])
+ internal = os.path.normpath(parts[1])
+
+ mode = 'rw'
+ if len(parts) == 3:
+ mode = parts[2]
+
+ return cls(external, internal, mode)
+
+ @classmethod
+ def _parse_win32(cls, volume_config, normalize):
+ # relative paths in windows expand to include the drive, eg C:\
+ # so we join the first 2 parts back together to count as one
+ mode = 'rw'
+
+ def separate_next_section(volume_config):
+ drive, tail = splitdrive(volume_config)
+ parts = tail.split(':', 1)
+ if drive:
+ parts[0] = drive + parts[0]
+ return parts
+
+ parts = separate_next_section(volume_config)
+ if len(parts) == 1:
+ internal = parts[0]
+ external = None
+ else:
+ external = parts[0]
+ parts = separate_next_section(parts[1])
+ external = os.path.normpath(external)
+ internal = parts[0]
+ if len(parts) > 1:
+ if ':' in parts[1]:
+ raise ConfigurationError(
+ "Volume %s has incorrect format, should be "
+ "external:internal[:mode]" % volume_config
+ )
+ mode = parts[1]
+
+ if normalize:
+ external = normalize_path_for_engine(external) if external else None
+
+ return cls(external, internal, mode)
+
+ @classmethod
+ def parse(cls, volume_config, normalize=False):
+ """Parse a volume_config path and split it into external:internal[:mode]
+ parts to be returned as a valid VolumeSpec.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return cls._parse_win32(volume_config, normalize)
+ else:
+ return cls._parse_unix(volume_config)
+
+ def repr(self):
+ external = self.external + ':' if self.external else ''
+ mode = ':' + self.mode if self.external else ''
+ return '{ext}{v.internal}{mode}'.format(mode=mode, ext=external, v=self)
+
+ @property
+ def is_named_volume(self):
+ res = self.external and not self.external.startswith(('.', '/', '~'))
+ if not IS_WINDOWS_PLATFORM:
+ return res
+
+ return (
+ res and not self.external.startswith('\\') and
+ not win32_root_path_pattern.match(self.external)
+ )
+
+
+class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
+
+ @classmethod
+ def parse(cls, link_spec):
+ target, _, alias = link_spec.partition(':')
+ if not alias:
+ alias = target
+ return cls(target, alias)
+
+ def repr(self):
+ if self.target == self.alias:
+ return self.target
+ return '{s.target}:{s.alias}'.format(s=self)
+
+ @property
+ def merge_field(self):
+ return self.alias
+
+
+class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode')):
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, six.string_types):
+ return cls(spec, None, None, None, None)
+ return cls(
+ spec.get('source'),
+ spec.get('target'),
+ spec.get('uid'),
+ spec.get('gid'),
+ spec.get('mode'),
+ )
+
+ @property
+ def merge_field(self):
+ return self.source
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+
+class ServiceSecret(ServiceConfigBase):
+ pass
+
+
+class ServiceConfig(ServiceConfigBase):
+ pass
+
+
+class ServicePort(namedtuple('_ServicePort', 'target published protocol mode external_ip')):
+ def __new__(cls, target, published, *args, **kwargs):
+ try:
+ if target:
+ target = int(target)
+ except ValueError:
+ raise ConfigurationError('Invalid target port: {}'.format(target))
+
+ try:
+ if published:
+ published = int(published)
+ except ValueError:
+ raise ConfigurationError('Invalid published port: {}'.format(published))
+
+ return super(ServicePort, cls).__new__(
+ cls, target, published, *args, **kwargs
+ )
+
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, cls):
+ # When extending a service with ports, the port definitions have already been parsed
+ return [spec]
+
+ if not isinstance(spec, dict):
+ result = []
+ try:
+ for k, v in build_port_bindings([spec]).items():
+ if '/' in k:
+ target, proto = k.split('/', 1)
+ else:
+ target, proto = (k, None)
+ for pub in v:
+ if pub is None:
+ result.append(
+ cls(target, None, proto, None, None)
+ )
+ elif isinstance(pub, tuple):
+ result.append(
+ cls(target, pub[1], proto, None, pub[0])
+ )
+ else:
+ result.append(
+ cls(target, pub, proto, None, None)
+ )
+ except ValueError as e:
+ raise ConfigurationError(str(e))
+
+ return result
+
+ return [cls(
+ spec.get('target'),
+ spec.get('published'),
+ spec.get('protocol'),
+ spec.get('mode'),
+ None
+ )]
+
+ @property
+ def merge_field(self):
+ return (self.target, self.published, self.external_ip, self.protocol)
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+ def legacy_repr(self):
+ return normalize_port_dict(self.repr())
+
+
+def normalize_port_dict(port):
+ return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
+ published=port.get('published', ''),
+ is_pub=(':' if port.get('published') is not None or port.get('external_ip') else ''),
+ target=port.get('target'),
+ protocol=port.get('protocol', 'tcp'),
+ external_ip=port.get('external_ip', ''),
+ has_ext_ip=(':' if port.get('external_ip') else ''),
+ )
diff --git a/compose/config/validation.py b/compose/config/validation.py
new file mode 100644
index 00000000..940775a2
--- /dev/null
+++ b/compose/config/validation.py
@@ -0,0 +1,467 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+import os
+import re
+import sys
+
+import six
+from docker.utils.ports import split_port
+from jsonschema import Draft4Validator
+from jsonschema import FormatChecker
+from jsonschema import RefResolver
+from jsonschema import ValidationError
+
+from ..const import COMPOSEFILE_V1 as V1
+from ..const import NANOCPUS_SCALE
+from .errors import ConfigurationError
+from .errors import VERSION_EXPLANATION
+from .sort_services import get_service_name_from_network_mode
+
+
+log = logging.getLogger(__name__)
+
+
+DOCKER_CONFIG_HINTS = {
+ 'cpu_share': 'cpu_shares',
+ 'add_host': 'extra_hosts',
+ 'hosts': 'extra_hosts',
+ 'extra_host': 'extra_hosts',
+ 'device': 'devices',
+ 'link': 'links',
+ 'memory_swap': 'memswap_limit',
+ 'port': 'ports',
+ 'privilege': 'privileged',
+ 'priviliged': 'privileged',
+ 'privilige': 'privileged',
+ 'volume': 'volumes',
+ 'workdir': 'working_dir',
+}
+
+
+VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
+VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
+
+
+@FormatChecker.cls_checks(format="ports", raises=ValidationError)
+def format_ports(instance):
+ try:
+ split_port(instance)
+ except ValueError as e:
+ raise ValidationError(six.text_type(e))
+ return True
+
+
+@FormatChecker.cls_checks(format="expose", raises=ValidationError)
+def format_expose(instance):
+ if isinstance(instance, six.string_types):
+ if not re.match(VALID_EXPOSE_FORMAT, instance):
+ raise ValidationError(
+ "should be of the format 'PORT[/PROTOCOL]'")
+
+ return True
+
+
+def match_named_volumes(service_dict, project_volumes):
+ service_volumes = service_dict.get('volumes', [])
+ for volume_spec in service_volumes:
+ if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
+ raise ConfigurationError(
+ 'Named volume "{0}" is used in service "{1}" but no'
+ ' declaration was found in the volumes section.'.format(
+ volume_spec.repr(), service_dict.get('name')
+ )
+ )
+
+
+def python_type_to_yaml_type(type_):
+ type_name = type(type_).__name__
+ return {
+ 'dict': 'mapping',
+ 'list': 'array',
+ 'int': 'number',
+ 'float': 'number',
+ 'bool': 'boolean',
+ 'unicode': 'string',
+ 'str': 'string',
+ 'bytes': 'string',
+ }.get(type_name, type_name)
+
+
+def validate_config_section(filename, config, section):
+ """Validate the structure of a configuration section. This must be done
+ before interpolation so it's separate from schema validation.
+ """
+ if not isinstance(config, dict):
+ raise ConfigurationError(
+ "In file '{filename}', {section} must be a mapping, not "
+ "{type}.".format(
+ filename=filename,
+ section=section,
+ type=anglicize_json_type(python_type_to_yaml_type(config))))
+
+ for key, value in config.items():
+ if not isinstance(key, six.string_types):
+ raise ConfigurationError(
+ "In file '{filename}', the {section} name {name} must be a "
+ "quoted string, i.e. '{name}'.".format(
+ filename=filename,
+ section=section,
+ name=key))
+
+ if not isinstance(value, (dict, type(None))):
+ raise ConfigurationError(
+ "In file '{filename}', {section} '{name}' must be a mapping not "
+ "{type}.".format(
+ filename=filename,
+ section=section,
+ name=key,
+ type=anglicize_json_type(python_type_to_yaml_type(value))))
+
+
+def validate_top_level_object(config_file):
+ if not isinstance(config_file.config, dict):
+ raise ConfigurationError(
+ "Top level object in '{}' needs to be an object not '{}'.".format(
+ config_file.filename,
+ type(config_file.config)))
+
+
+def validate_ulimits(service_config):
+ ulimit_config = service_config.config.get('ulimits', {})
+ for limit_name, soft_hard_values in six.iteritems(ulimit_config):
+ if isinstance(soft_hard_values, dict):
+ if not soft_hard_values['soft'] <= soft_hard_values['hard']:
+ raise ConfigurationError(
+ "Service '{s.name}' has invalid ulimit '{ulimit}'. "
+ "'soft' value can not be greater than 'hard' value ".format(
+ s=service_config,
+ ulimit=ulimit_config))
+
+
+def validate_extends_file_path(service_name, extends_options, filename):
+ """
+ The service to be extended must either be defined in the config key 'file',
+ or within 'filename'.
+ """
+ error_prefix = "Invalid 'extends' configuration for %s:" % service_name
+
+ if 'file' not in extends_options and filename is None:
+ raise ConfigurationError(
+ "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
+ )
+
+
+def validate_network_mode(service_config, service_names):
+ network_mode = service_config.config.get('network_mode')
+ if not network_mode:
+ return
+
+ if 'networks' in service_config.config:
+ raise ConfigurationError("'network_mode' and 'networks' cannot be combined")
+
+ dependency = get_service_name_from_network_mode(network_mode)
+ if not dependency:
+ return
+
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' uses the network stack of service '{dep}' which "
+ "is undefined.".format(s=service_config, dep=dependency))
+
+
+def validate_pid_mode(service_config, service_names):
+ pid_mode = service_config.config.get('pid')
+ if not pid_mode:
+ return
+
+ dependency = get_service_name_from_network_mode(pid_mode)
+ if not dependency:
+ return
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' uses the PID namespace of service '{dep}' which "
+ "is undefined.".format(s=service_config, dep=dependency)
+ )
+
+
+def validate_links(service_config, service_names):
+ for link in service_config.config.get('links', []):
+ if link.split(':')[0] not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' has a link to service '{link}' which is "
+ "undefined.".format(s=service_config, link=link))
+
+
+def validate_depends_on(service_config, service_names):
+ deps = service_config.config.get('depends_on', {})
+ for dependency in deps.keys():
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' depends on service '{dep}' which is "
+ "undefined.".format(s=service_config, dep=dependency)
+ )
+
+
+def get_unsupported_config_msg(path, error_key):
+ msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
+ if error_key in DOCKER_CONFIG_HINTS:
+ msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
+ return msg
+
+
+def anglicize_json_type(json_type):
+ if json_type.startswith(('a', 'e', 'i', 'o', 'u')):
+ return 'an ' + json_type
+ return 'a ' + json_type
+
+
+def is_service_dict_schema(schema_id):
+ return schema_id in ('config_schema_v1.json', '#/properties/services')
+
+
+def handle_error_for_schema_with_id(error, path):
+ schema_id = error.schema['id']
+
+ if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
+ return "Invalid service name '{}' - only {} characters are allowed".format(
+ # The service_name is one of the keys in the json object
+ [i for i in list(error.instance) if not i or any(filter(
+ lambda c: not re.match(VALID_NAME_CHARS, c), i
+ ))][0],
+ VALID_NAME_CHARS
+ )
+
+ if error.validator == 'additionalProperties':
+ if schema_id == '#/definitions/service':
+ invalid_config_key = parse_key_from_error_msg(error)
+ return get_unsupported_config_msg(path, invalid_config_key)
+
+ if schema_id.startswith('config_schema_v'):
+ invalid_config_key = parse_key_from_error_msg(error)
+ return ('Invalid top-level property "{key}". Valid top-level '
+ 'sections for this Compose file are: {properties}, and '
+ 'extensions starting with "x-".\n\n{explanation}').format(
+ key=invalid_config_key,
+ properties=', '.join(error.schema['properties'].keys()),
+ explanation=VERSION_EXPLANATION
+ )
+
+ if not error.path:
+ return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
+
+
+def handle_generic_error(error, path):
+ msg_format = None
+ error_msg = error.message
+
+ if error.validator == 'oneOf':
+ msg_format = "{path} {msg}"
+ config_key, error_msg = _parse_oneof_validator(error)
+ if config_key:
+ path.append(config_key)
+
+ elif error.validator == 'type':
+ msg_format = "{path} contains an invalid type, it should be {msg}"
+ error_msg = _parse_valid_types_from_validator(error.validator_value)
+
+ elif error.validator == 'required':
+ error_msg = ", ".join(error.validator_value)
+ msg_format = "{path} is invalid, {msg} is required."
+
+ elif error.validator == 'dependencies':
+ config_key = list(error.validator_value.keys())[0]
+ required_keys = ",".join(error.validator_value[config_key])
+
+ msg_format = "{path} is invalid: {msg}"
+ path.append(config_key)
+ error_msg = "when defining '{}' you must set '{}' as well".format(
+ config_key,
+ required_keys)
+
+ elif error.cause:
+ error_msg = six.text_type(error.cause)
+ msg_format = "{path} is invalid: {msg}"
+
+ elif error.path:
+ msg_format = "{path} value {msg}"
+
+ if msg_format:
+ return msg_format.format(path=path_string(path), msg=error_msg)
+
+ return error.message
+
+
+def parse_key_from_error_msg(error):
+ return error.message.split("'")[1]
+
+
+def path_string(path):
+ return ".".join(c for c in path if isinstance(c, six.string_types))
+
+
+def _parse_valid_types_from_validator(validator):
+ """A validator value can be either an array of valid types or a string of
+ a valid type. Parse the valid types and prefix with the correct article.
+ """
+ if not isinstance(validator, list):
+ return anglicize_json_type(validator)
+
+ if len(validator) == 1:
+ return anglicize_json_type(validator[0])
+
+ return "{}, or {}".format(
+ ", ".join([anglicize_json_type(validator[0])] + validator[1:-1]),
+ anglicize_json_type(validator[-1]))
+
+
+def _parse_oneof_validator(error):
+ """oneOf has multiple schemas, so we need to reason about which schema, sub
+ schema or constraint the validation is failing on.
+ Inspecting the context value of a ValidationError gives us information about
+ which sub schema failed and which kind of error it is.
+ """
+ types = []
+ for context in error.context:
+ if context.validator == 'oneOf':
+ _, error_msg = _parse_oneof_validator(context)
+ return path_string(context.path), error_msg
+
+ if context.validator == 'required':
+ return (None, context.message)
+
+ if context.validator == 'additionalProperties':
+ invalid_config_key = parse_key_from_error_msg(context)
+ return (None, "contains unsupported option: '{}'".format(invalid_config_key))
+
+ if context.validator == 'uniqueItems':
+ return (
+ path_string(context.path) if context.path else None,
+ "contains non-unique items, please remove duplicates from {}".format(
+ context.instance),
+ )
+
+ if context.path:
+ return (
+ path_string(context.path),
+ "contains {}, which is an invalid type, it should be {}".format(
+ json.dumps(context.instance),
+ _parse_valid_types_from_validator(context.validator_value)),
+ )
+
+ if context.validator == 'type':
+ types.append(context.validator_value)
+
+ valid_types = _parse_valid_types_from_validator(types)
+ return (None, "contains an invalid type, it should be {}".format(valid_types))
+
+
+def process_service_constraint_errors(error, service_name, version):
+ if version == V1:
+ if 'image' in error.instance and 'build' in error.instance:
+ return (
+ "Service {} has both an image and build path specified. "
+ "A service can either be built to image or use an existing "
+ "image, not both.".format(service_name))
+
+ if 'image' in error.instance and 'dockerfile' in error.instance:
+ return (
+ "Service {} has both an image and alternate Dockerfile. "
+ "A service can either be built to image or use an existing "
+ "image, not both.".format(service_name))
+
+ if 'image' not in error.instance and 'build' not in error.instance:
+ return (
+ "Service {} has neither an image nor a build context specified. "
+ "At least one must be provided.".format(service_name))
+
+
+def process_config_schema_errors(error):
+ path = list(error.path)
+
+ if 'id' in error.schema:
+ error_msg = handle_error_for_schema_with_id(error, path)
+ if error_msg:
+ return error_msg
+
+ return handle_generic_error(error, path)
+
+
+def validate_against_config_schema(config_file):
+ schema = load_jsonschema(config_file)
+ format_checker = FormatChecker(["ports", "expose"])
+ validator = Draft4Validator(
+ schema,
+ resolver=RefResolver(get_resolver_path(), schema),
+ format_checker=format_checker)
+ handle_errors(
+ validator.iter_errors(config_file.config),
+ process_config_schema_errors,
+ config_file.filename)
+
+
+def validate_service_constraints(config, service_name, config_file):
+ def handler(errors):
+ return process_service_constraint_errors(
+ errors, service_name, config_file.version)
+
+ schema = load_jsonschema(config_file)
+ validator = Draft4Validator(schema['definitions']['constraints']['service'])
+ handle_errors(validator.iter_errors(config), handler, None)
+
+
+def validate_cpu(service_config):
+ cpus = service_config.config.get('cpus')
+ if not cpus:
+ return
+ nano_cpus = cpus * NANOCPUS_SCALE
+ if isinstance(nano_cpus, float) and not nano_cpus.is_integer():
+ raise ConfigurationError(
+ "cpus must have nine or less digits after decimal point")
+
+
+def get_schema_path():
+ return os.path.dirname(os.path.abspath(__file__))
+
+
+def load_jsonschema(config_file):
+ filename = os.path.join(
+ get_schema_path(),
+ "config_schema_v{0}.json".format(config_file.version))
+
+ if not os.path.exists(filename):
+ raise ConfigurationError(
+ 'Version in "{}" is unsupported. {}'
+ .format(config_file.filename, VERSION_EXPLANATION))
+
+ with open(filename, "r") as fh:
+ return json.load(fh)
+
+
+def get_resolver_path():
+ schema_path = get_schema_path()
+ if sys.platform == "win32":
+ scheme = "///"
+ # TODO: why is this necessary?
+ schema_path = schema_path.replace('\\', '/')
+ else:
+ scheme = "//"
+ return "file:{}{}/".format(scheme, schema_path)
+
+
+def handle_errors(errors, format_error_func, filename):
+ """jsonschema returns an error tree full of information to explain what has
+ gone wrong. Process each error and pull out relevant information and re-write
+ helpful error messages that are relevant.
+ """
+ errors = list(sorted(errors, key=str))
+ if not errors:
+ return
+
+ error_msg = '\n'.join(format_error_func(error) for error in errors)
+ raise ConfigurationError(
+ "The Compose file{file_msg} is invalid because:\n{error_msg}".format(
+ file_msg=" '{}'".format(filename) if filename else "",
+ error_msg=error_msg))
diff --git a/compose/const.py b/compose/const.py
new file mode 100644
index 00000000..2ac08b89
--- /dev/null
+++ b/compose/const.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+
+from .version import ComposeVersion
+
+DEFAULT_TIMEOUT = 10
+HTTP_TIMEOUT = 60
+IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
+IS_WINDOWS_PLATFORM = (sys.platform == "win32")
+LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
+LABEL_ONE_OFF = 'com.docker.compose.oneoff'
+LABEL_PROJECT = 'com.docker.compose.project'
+LABEL_SERVICE = 'com.docker.compose.service'
+LABEL_NETWORK = 'com.docker.compose.network'
+LABEL_VERSION = 'com.docker.compose.version'
+LABEL_VOLUME = 'com.docker.compose.volume'
+LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
+NANOCPUS_SCALE = 1000000000
+
+SECRETS_PATH = '/run/secrets'
+
+COMPOSEFILE_V1 = ComposeVersion('1')
+COMPOSEFILE_V2_0 = ComposeVersion('2.0')
+COMPOSEFILE_V2_1 = ComposeVersion('2.1')
+COMPOSEFILE_V2_2 = ComposeVersion('2.2')
+COMPOSEFILE_V2_3 = ComposeVersion('2.3')
+
+COMPOSEFILE_V3_0 = ComposeVersion('3.0')
+COMPOSEFILE_V3_1 = ComposeVersion('3.1')
+COMPOSEFILE_V3_2 = ComposeVersion('3.2')
+COMPOSEFILE_V3_3 = ComposeVersion('3.3')
+COMPOSEFILE_V3_4 = ComposeVersion('3.4')
+COMPOSEFILE_V3_5 = ComposeVersion('3.5')
+
+API_VERSIONS = {
+ COMPOSEFILE_V1: '1.21',
+ COMPOSEFILE_V2_0: '1.22',
+ COMPOSEFILE_V2_1: '1.24',
+ COMPOSEFILE_V2_2: '1.25',
+ COMPOSEFILE_V2_3: '1.30',
+ COMPOSEFILE_V3_0: '1.25',
+ COMPOSEFILE_V3_1: '1.25',
+ COMPOSEFILE_V3_2: '1.25',
+ COMPOSEFILE_V3_3: '1.30',
+ COMPOSEFILE_V3_4: '1.30',
+ COMPOSEFILE_V3_5: '1.30',
+}
+
+API_VERSION_TO_ENGINE_VERSION = {
+ API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
+ API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
+ API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
+ API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V2_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
+}
diff --git a/compose/container.py b/compose/container.py
new file mode 100644
index 00000000..4bc7f54f
--- /dev/null
+++ b/compose/container.py
@@ -0,0 +1,276 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from functools import reduce
+
+import six
+
+from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+
+
+class Container(object):
+ """
+ Represents a Docker container, constructed from the output of
+ GET /containers/:id:/json.
+ """
+ def __init__(self, client, dictionary, has_been_inspected=False):
+ self.client = client
+ self.dictionary = dictionary
+ self.has_been_inspected = has_been_inspected
+ self.log_stream = None
+
+ @classmethod
+ def from_ps(cls, client, dictionary, **kwargs):
+ """
+ Construct a container object from the output of GET /containers/json.
+ """
+ name = get_container_name(dictionary)
+ if name is None:
+ return None
+
+ new_dictionary = {
+ 'Id': dictionary['Id'],
+ 'Image': dictionary['Image'],
+ 'Name': '/' + name,
+ }
+ return cls(client, new_dictionary, **kwargs)
+
+ @classmethod
+ def from_id(cls, client, id):
+ return cls(client, client.inspect_container(id), has_been_inspected=True)
+
+ @classmethod
+ def create(cls, client, **options):
+ response = client.create_container(**options)
+ return cls.from_id(client, response['Id'])
+
+ @property
+ def id(self):
+ return self.dictionary['Id']
+
+ @property
+ def image(self):
+ return self.dictionary['Image']
+
+ @property
+ def image_config(self):
+ return self.client.inspect_image(self.image)
+
+ @property
+ def short_id(self):
+ return self.id[:12]
+
+ @property
+ def name(self):
+ return self.dictionary['Name'][1:]
+
+ @property
+ def service(self):
+ return self.labels.get(LABEL_SERVICE)
+
+ @property
+ def name_without_project(self):
+ project = self.labels.get(LABEL_PROJECT)
+
+ if self.name.startswith('{0}_{1}'.format(project, self.service)):
+ return '{0}_{1}'.format(self.service, self.number)
+ else:
+ return self.name
+
+ @property
+ def number(self):
+ number = self.labels.get(LABEL_CONTAINER_NUMBER)
+ if not number:
+ raise ValueError("Container {0} does not have a {1} label".format(
+ self.short_id, LABEL_CONTAINER_NUMBER))
+ return int(number)
+
+ @property
+ def ports(self):
+ self.inspect_if_not_inspected()
+ return self.get('NetworkSettings.Ports') or {}
+
+ @property
+ def human_readable_ports(self):
+ def format_port(private, public):
+ if not public:
+ return [private]
+ return [
+ '{HostIp}:{HostPort}->{private}'.format(private=private, **pub)
+ for pub in public
+ ]
+
+ return ', '.join(
+ ','.join(format_port(*item))
+ for item in sorted(six.iteritems(self.ports))
+ )
+
+ @property
+ def labels(self):
+ return self.get('Config.Labels') or {}
+
+ @property
+ def stop_signal(self):
+ return self.get('Config.StopSignal')
+
+ @property
+ def log_config(self):
+ return self.get('HostConfig.LogConfig') or None
+
+ @property
+ def human_readable_state(self):
+ if self.is_paused:
+ return 'Paused'
+ if self.is_restarting:
+ return 'Restarting'
+ if self.is_running:
+ return 'Ghost' if self.get('State.Ghost') else 'Up'
+ else:
+ return 'Exit %s' % self.get('State.ExitCode')
+
+ @property
+ def human_readable_command(self):
+ entrypoint = self.get('Config.Entrypoint') or []
+ cmd = self.get('Config.Cmd') or []
+ return ' '.join(entrypoint + cmd)
+
+ @property
+ def environment(self):
+ def parse_env(var):
+ if '=' in var:
+ return var.split("=", 1)
+ return var, None
+ return dict(parse_env(var) for var in self.get('Config.Env') or [])
+
+ @property
+ def exit_code(self):
+ return self.get('State.ExitCode')
+
+ @property
+ def is_running(self):
+ return self.get('State.Running')
+
+ @property
+ def is_restarting(self):
+ return self.get('State.Restarting')
+
+ @property
+ def is_paused(self):
+ return self.get('State.Paused')
+
+ @property
+ def log_driver(self):
+ return self.get('HostConfig.LogConfig.Type')
+
+ @property
+ def has_api_logs(self):
+ log_type = self.log_driver
+ return not log_type or log_type in ('json-file', 'journald')
+
+ def attach_log_stream(self):
+ """A log stream can only be attached if the container uses a json-file
+ log driver.
+ """
+ if self.has_api_logs:
+ self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
+
+ def get(self, key):
+ """Return a value from the container or None if the value is not set.
+
+ :param key: a string using dotted notation for nested dictionary
+ lookups
+ """
+ self.inspect_if_not_inspected()
+
+ def get_value(dictionary, key):
+ return (dictionary or {}).get(key)
+
+ return reduce(get_value, key.split('.'), self.dictionary)
+
+ def get_local_port(self, port, protocol='tcp'):
+ port = self.ports.get("%s/%s" % (port, protocol))
+ return "{HostIp}:{HostPort}".format(**port[0]) if port else None
+
+ def get_mount(self, mount_dest):
+ for mount in self.get('Mounts'):
+ if mount['Destination'] == mount_dest:
+ return mount
+ return None
+
+ def start(self, **options):
+ return self.client.start(self.id, **options)
+
+ def stop(self, **options):
+ return self.client.stop(self.id, **options)
+
+ def pause(self, **options):
+ return self.client.pause(self.id, **options)
+
+ def unpause(self, **options):
+ return self.client.unpause(self.id, **options)
+
+ def kill(self, **options):
+ return self.client.kill(self.id, **options)
+
+ def restart(self, **options):
+ return self.client.restart(self.id, **options)
+
+ def remove(self, **options):
+ return self.client.remove_container(self.id, **options)
+
+ def create_exec(self, command, **options):
+ return self.client.exec_create(self.id, command, **options)
+
+ def start_exec(self, exec_id, **options):
+ return self.client.exec_start(exec_id, **options)
+
+ def rename_to_tmp_name(self):
+ """Rename the container to a hopefully unique temporary container name
+ by prepending the short id.
+ """
+ self.client.rename(
+ self.id,
+ '%s_%s' % (self.short_id, self.name)
+ )
+
+ def inspect_if_not_inspected(self):
+ if not self.has_been_inspected:
+ self.inspect()
+
+ def wait(self):
+ return self.client.wait(self.id)
+
+ def logs(self, *args, **kwargs):
+ return self.client.logs(self.id, *args, **kwargs)
+
+ def inspect(self):
+ self.dictionary = self.client.inspect_container(self.id)
+ self.has_been_inspected = True
+ return self.dictionary
+
+ def attach(self, *args, **kwargs):
+ return self.client.attach(self.id, *args, **kwargs)
+
+ def __repr__(self):
+ return '<Container: %s (%s)>' % (self.name, self.id[:6])
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return False
+ return self.id == other.id
+
+ def __hash__(self):
+ return self.id.__hash__()
+
+
+def get_container_name(container):
+ if not container.get('Name') and not container.get('Names'):
+ return None
+ # inspect
+ if 'Name' in container:
+ return container['Name']
+ # ps
+ shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
+ return shortest_name.split('/')[-1]
diff --git a/compose/errors.py b/compose/errors.py
new file mode 100644
index 00000000..415b41e7
--- /dev/null
+++ b/compose/errors.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+
+class OperationFailedError(Exception):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckException(Exception):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckFailed(HealthCheckException):
+ def __init__(self, container_id):
+ super(HealthCheckFailed, self).__init__(
+ 'Container "{}" is unhealthy.'.format(container_id)
+ )
+
+
+class NoHealthCheckConfigured(HealthCheckException):
+ def __init__(self, service_name):
+ super(NoHealthCheckConfigured, self).__init__(
+ 'Service "{}" is missing a healthcheck configuration'.format(
+ service_name
+ )
+ )
diff --git a/compose/network.py b/compose/network.py
new file mode 100644
index 00000000..2e0a7e6e
--- /dev/null
+++ b/compose/network.py
@@ -0,0 +1,286 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from docker.errors import NotFound
+from docker.types import IPAMConfig
+from docker.types import IPAMPool
+from docker.utils import version_gte
+from docker.utils import version_lt
+
+from .config import ConfigurationError
+from .const import LABEL_NETWORK
+from .const import LABEL_PROJECT
+
+
+log = logging.getLogger(__name__)
+
+OPTS_EXCEPTIONS = [
+ 'com.docker.network.driver.overlay.vxlanid_list',
+ 'com.docker.network.windowsshim.hnsid',
+ 'com.docker.network.windowsshim.networkname'
+]
+
+
+class Network(object):
+ def __init__(self, client, project, name, driver=None, driver_opts=None,
+ ipam=None, external_name=None, internal=False, enable_ipv6=False,
+ labels=None):
+ self.client = client
+ self.project = project
+ self.name = name
+ self.driver = driver
+ self.driver_opts = driver_opts
+ self.ipam = create_ipam_config_from_dict(ipam)
+ self.external_name = external_name
+ self.internal = internal
+ self.enable_ipv6 = enable_ipv6
+ self.labels = labels
+
+ def ensure(self):
+ if self.external_name:
+ try:
+ self.inspect()
+ log.debug(
+ 'Network {0} declared as external. No new '
+ 'network will be created.'.format(self.name)
+ )
+ except NotFound:
+ raise ConfigurationError(
+ 'Network {name} declared as external, but could'
+ ' not be found. Please create the network manually'
+ ' using `{command} {name}` and try again.'.format(
+ name=self.external_name,
+ command='docker network create'
+ )
+ )
+ return
+
+ try:
+ data = self.inspect()
+ check_remote_network_config(data, self)
+ except NotFound:
+ driver_name = 'the default driver'
+ if self.driver:
+ driver_name = 'driver "{}"'.format(self.driver)
+
+ log.info(
+ 'Creating network "{}" with {}'
+ .format(self.full_name, driver_name)
+ )
+
+ self.client.create_network(
+ name=self.full_name,
+ driver=self.driver,
+ options=self.driver_opts,
+ ipam=self.ipam,
+ internal=self.internal,
+ enable_ipv6=self.enable_ipv6,
+ labels=self._labels,
+ attachable=version_gte(self.client._version, '1.24') or None,
+ check_duplicate=True,
+ )
+
+ def remove(self):
+ if self.external_name:
+ log.info("Network %s is external, skipping", self.full_name)
+ return
+
+ log.info("Removing network {}".format(self.full_name))
+ self.client.remove_network(self.full_name)
+
+ def inspect(self):
+ return self.client.inspect_network(self.full_name)
+
+ @property
+ def full_name(self):
+ if self.external_name:
+ return self.external_name
+ return '{0}_{1}'.format(self.project, self.name)
+
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_NETWORK: self.name,
+ })
+ return labels
+
+
+def create_ipam_config_from_dict(ipam_dict):
+ if not ipam_dict:
+ return None
+
+ return IPAMConfig(
+ driver=ipam_dict.get('driver'),
+ pool_configs=[
+ IPAMPool(
+ subnet=config.get('subnet'),
+ iprange=config.get('ip_range'),
+ gateway=config.get('gateway'),
+ aux_addresses=config.get('aux_addresses'),
+ )
+ for config in ipam_dict.get('config', [])
+ ],
+ options=ipam_dict.get('options')
+ )
+
+
+class NetworkConfigChangedError(ConfigurationError):
+ def __init__(self, net_name, property_name):
+ super(NetworkConfigChangedError, self).__init__(
+ 'Network "{}" needs to be recreated - {} has changed'.format(
+ net_name, property_name
+ )
+ )
+
+
+def check_remote_ipam_config(remote, local):
+ remote_ipam = remote.get('IPAM')
+ ipam_dict = create_ipam_config_from_dict(local.ipam)
+ if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM driver')
+ if len(ipam_dict['Config']) != 0:
+ if len(ipam_dict['Config']) != len(remote_ipam['Config']):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM configs')
+ remote_configs = sorted(remote_ipam['Config'], key='Subnet')
+ local_configs = sorted(ipam_dict['Config'], key='Subnet')
+ while local_configs:
+ lc = local_configs.pop()
+ rc = remote_configs.pop()
+ if lc.get('Subnet') != rc.get('Subnet'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config subnet')
+ if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config gateway')
+ if lc.get('IPRange') != rc.get('IPRange'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config ip_range')
+ if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
+
+ remote_opts = remote_ipam.get('Options') or {}
+ local_opts = local.ipam.get('options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+
+
+def check_remote_network_config(remote, local):
+ if local.driver and remote.get('Driver') != local.driver:
+ raise NetworkConfigChangedError(local.full_name, 'driver')
+ local_opts = local.driver_opts or {}
+ remote_opts = remote.get('Options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if k in OPTS_EXCEPTIONS:
+ continue
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'option "{}"'.format(k))
+
+ if local.ipam is not None:
+ check_remote_ipam_config(remote, local)
+
+ if local.internal is not None and local.internal != remote.get('Internal', False):
+ raise NetworkConfigChangedError(local.full_name, 'internal')
+ if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
+ raise NetworkConfigChangedError(local.full_name, 'enable_ipv6')
+
+ local_labels = local.labels or {}
+ remote_labels = remote.get('Labels', {})
+ for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
+ if k.startswith('com.docker.'): # We are only interested in user-specified labels
+ continue
+ if remote_labels.get(k) != local_labels.get(k):
+ log.warn(
+ 'Network {}: label "{}" has changed. It may need to be'
+ ' recreated.'.format(local.full_name, k)
+ )
+
+
+def build_networks(name, config_data, client):
+ network_config = config_data.networks or {}
+ networks = {
+ network_name: Network(
+ client=client, project=name, name=network_name,
+ driver=data.get('driver'),
+ driver_opts=data.get('driver_opts'),
+ ipam=data.get('ipam'),
+ external_name=data.get('external_name'),
+ internal=data.get('internal'),
+ enable_ipv6=data.get('enable_ipv6'),
+ labels=data.get('labels'),
+ )
+ for network_name, data in network_config.items()
+ }
+
+ if 'default' not in networks:
+ networks['default'] = Network(client, name, 'default')
+
+ return networks
+
+
+class ProjectNetworks(object):
+
+ def __init__(self, networks, use_networking):
+ self.networks = networks or {}
+ self.use_networking = use_networking
+
+ @classmethod
+ def from_services(cls, services, networks, use_networking):
+ service_networks = {
+ network: networks.get(network)
+ for service in services
+ for network in get_network_names_for_service(service)
+ }
+ unused = set(networks) - set(service_networks) - {'default'}
+ if unused:
+ log.warn(
+ "Some networks were defined but are not used by any service: "
+ "{}".format(", ".join(unused)))
+ return cls(service_networks, use_networking)
+
+ def remove(self):
+ if not self.use_networking:
+ return
+ for network in self.networks.values():
+ try:
+ network.remove()
+ except NotFound:
+ log.warn("Network %s not found.", network.full_name)
+
+ def initialize(self):
+ if not self.use_networking:
+ return
+
+ for network in self.networks.values():
+ network.ensure()
+
+
+def get_network_defs_for_service(service_dict):
+ if 'network_mode' in service_dict:
+ return {}
+ networks = service_dict.get('networks', {'default': None})
+ return dict(
+ (net, (config or {}))
+ for net, config in networks.items()
+ )
+
+
+def get_network_names_for_service(service_dict):
+ return get_network_defs_for_service(service_dict).keys()
+
+
+def get_networks(service_dict, network_definitions):
+ networks = {}
+ for name, netdef in get_network_defs_for_service(service_dict).items():
+ network = network_definitions.get(name)
+ if network:
+ networks[network.full_name] = netdef
+ else:
+ raise ConfigurationError(
+ 'Service "{}" uses an undefined network "{}"'
+ .format(service_dict['name'], name))
+
+ return networks
diff --git a/compose/parallel.py b/compose/parallel.py
new file mode 100644
index 00000000..d455711d
--- /dev/null
+++ b/compose/parallel.py
@@ -0,0 +1,298 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import operator
+import sys
+from threading import Semaphore
+from threading import Thread
+
+from docker.errors import APIError
+from six.moves import _thread as thread
+from six.moves.queue import Empty
+from six.moves.queue import Queue
+
+from compose.cli.colors import green
+from compose.cli.colors import red
+from compose.cli.signals import ShutdownException
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
+from compose.errors import OperationFailedError
+from compose.utils import get_output_stream
+
+
+log = logging.getLogger(__name__)
+
+STOP = object()
+
+
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+ """Runs func on objects in parallel while ensuring that func is
+ ran on object only after it is ran on all its dependencies.
+
+ get_deps called on object must return a collection with its dependencies.
+ get_name called on object must return its name.
+ """
+ objects = list(objects)
+ stream = get_output_stream(sys.stderr)
+
+ writer = ParallelStreamWriter(stream, msg)
+ for obj in objects:
+ writer.add_object(get_name(obj))
+ writer.write_initial()
+
+ events = parallel_execute_iter(objects, func, get_deps, limit)
+
+ errors = {}
+ results = []
+ error_to_reraise = None
+
+ for obj, result, exception in events:
+ if exception is None:
+ writer.write(get_name(obj), 'done', green)
+ results.append(result)
+ elif isinstance(exception, APIError):
+ errors[get_name(obj)] = exception.explanation
+ writer.write(get_name(obj), 'error', red)
+ elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
+ errors[get_name(obj)] = exception.msg
+ writer.write(get_name(obj), 'error', red)
+ elif isinstance(exception, UpstreamError):
+ writer.write(get_name(obj), 'error', red)
+ else:
+ errors[get_name(obj)] = exception
+ error_to_reraise = exception
+
+ for obj_name, error in errors.items():
+ stream.write("\nERROR: for {} {}\n".format(obj_name, error))
+
+ if error_to_reraise:
+ raise error_to_reraise
+
+ return results, errors
+
+
+def _no_deps(x):
+ return []
+
+
+class State(object):
+ """
+ Holds the state of a partially-complete parallel operation.
+
+ state.started: objects being processed
+ state.finished: objects which have been processed
+ state.failed: objects which either failed or whose dependencies failed
+ """
+ def __init__(self, objects):
+ self.objects = objects
+
+ self.started = set()
+ self.finished = set()
+ self.failed = set()
+
+ def is_done(self):
+ return len(self.finished) + len(self.failed) >= len(self.objects)
+
+ def pending(self):
+ return set(self.objects) - self.started - self.finished - self.failed
+
+
+class NoLimit(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *ex):
+ pass
+
+
+def parallel_execute_iter(objects, func, get_deps, limit):
+ """
+ Runs func on objects in parallel while ensuring that func is
+ ran on object only after it is ran on all its dependencies.
+
+ Returns an iterator of tuples which look like:
+
+ # if func returned normally when run on object
+ (object, result, None)
+
+ # if func raised an exception when run on object
+ (object, None, exception)
+
+ # if func raised an exception when run on one of object's dependencies
+ (object, None, UpstreamError())
+ """
+ if get_deps is None:
+ get_deps = _no_deps
+
+ if limit is None:
+ limiter = NoLimit()
+ else:
+ limiter = Semaphore(limit)
+
+ results = Queue()
+ state = State(objects)
+
+ while True:
+ feed_queue(objects, func, get_deps, results, state, limiter)
+
+ try:
+ event = results.get(timeout=0.1)
+ except Empty:
+ continue
+ # See https://github.com/docker/compose/issues/189
+ except thread.error:
+ raise ShutdownException()
+
+ if event is STOP:
+ break
+
+ obj, _, exception = event
+ if exception is None:
+ log.debug('Finished processing: {}'.format(obj))
+ state.finished.add(obj)
+ else:
+ log.debug('Failed: {}'.format(obj))
+ state.failed.add(obj)
+
+ yield event
+
+
+def producer(obj, func, results, limiter):
+ """
+ The entry point for a producer thread which runs func on a single object.
+ Places a tuple on the results queue once func has either returned or raised.
+ """
+ with limiter:
+ try:
+ result = func(obj)
+ results.put((obj, result, None))
+ except Exception as e:
+ results.put((obj, None, e))
+
+
+def feed_queue(objects, func, get_deps, results, state, limiter):
+ """
+ Starts producer threads for any objects which are ready to be processed
+ (i.e. they have no dependencies which haven't been successfully processed).
+
+ Shortcuts any objects whose dependencies have failed and places an
+ (object, None, UpstreamError()) tuple on the results queue.
+ """
+ pending = state.pending()
+ log.debug('Pending: {}'.format(pending))
+
+ for obj in pending:
+ deps = get_deps(obj)
+ try:
+ if any(dep[0] in state.failed for dep in deps):
+ log.debug('{} has upstream errors - not processing'.format(obj))
+ results.put((obj, None, UpstreamError()))
+ state.failed.add(obj)
+ elif all(
+ dep not in objects or (
+ dep in state.finished and (not ready_check or ready_check(dep))
+ ) for dep, ready_check in deps
+ ):
+ log.debug('Starting producer thread for {}'.format(obj))
+ t = Thread(target=producer, args=(obj, func, results, limiter))
+ t.daemon = True
+ t.start()
+ state.started.add(obj)
+ except (HealthCheckFailed, NoHealthCheckConfigured) as e:
+ log.debug(
+ 'Healthcheck for service(s) upstream of {} failed - '
+ 'not processing'.format(obj)
+ )
+ results.put((obj, None, e))
+
+ if state.is_done():
+ results.put(STOP)
+
+
+class UpstreamError(Exception):
+ pass
+
+
+class ParallelStreamWriter(object):
+ """Write out messages for operations happening in parallel.
+
+ Each operation has its own line, and ANSI code characters are used
+ to jump to the correct line, and write over the line.
+ """
+
+ noansi = False
+
+ @classmethod
+ def set_noansi(cls, value=True):
+ cls.noansi = value
+
+ def __init__(self, stream, msg):
+ self.stream = stream
+ self.msg = msg
+ self.lines = []
+ self.width = 0
+
+ def add_object(self, obj_index):
+ self.lines.append(obj_index)
+ self.width = max(self.width, len(obj_index))
+
+ def write_initial(self):
+ if self.msg is None:
+ return
+ for line in self.lines:
+ self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
+ width=self.width))
+ self.stream.flush()
+
+ def _write_ansi(self, obj_index, status):
+ position = self.lines.index(obj_index)
+ diff = len(self.lines) - position
+ # move up
+ self.stream.write("%c[%dA" % (27, diff))
+ # erase
+ self.stream.write("%c[2K\r" % 27)
+ self.stream.write("{} {:<{width}} ... {}\r".format(self.msg, obj_index,
+ status, width=self.width))
+ # move back down
+ self.stream.write("%c[%dB" % (27, diff))
+ self.stream.flush()
+
+ def _write_noansi(self, obj_index, status):
+ self.stream.write("{} {:<{width}} ... {}\r\n".format(self.msg, obj_index,
+ status, width=self.width))
+ self.stream.flush()
+
+ def write(self, obj_index, status, color_func):
+ if self.msg is None:
+ return
+ if self.noansi:
+ self._write_noansi(obj_index, status)
+ else:
+ self._write_ansi(obj_index, color_func(status))
+
+
+def parallel_operation(containers, operation, options, message):
+ parallel_execute(
+ containers,
+ operator.methodcaller(operation, **options),
+ operator.attrgetter('name'),
+ message,
+ )
+
+
+def parallel_remove(containers, options):
+ stopped_containers = [c for c in containers if not c.is_running]
+ parallel_operation(stopped_containers, 'remove', options, 'Removing')
+
+
+def parallel_pause(containers, options):
+ parallel_operation(containers, 'pause', options, 'Pausing')
+
+
+def parallel_unpause(containers, options):
+ parallel_operation(containers, 'unpause', options, 'Unpausing')
+
+
+def parallel_kill(containers, options):
+ parallel_operation(containers, 'kill', options, 'Killing')
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
new file mode 100644
index 00000000..5314f89f
--- /dev/null
+++ b/compose/progress_stream.py
@@ -0,0 +1,111 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import utils
+
+
+class StreamOutputError(Exception):
+ pass
+
+
+def stream_output(output, stream):
+ is_terminal = hasattr(stream, 'isatty') and stream.isatty()
+ stream = utils.get_output_stream(stream)
+ all_events = []
+ lines = {}
+ diff = 0
+
+ for event in utils.json_stream(output):
+ all_events.append(event)
+ is_progress_event = 'progress' in event or 'progressDetail' in event
+
+ if not is_progress_event:
+ print_output_event(event, stream, is_terminal)
+ stream.flush()
+ continue
+
+ if not is_terminal:
+ continue
+
+ # if it's a progress event and we have a terminal, then display the progress bars
+ image_id = event.get('id')
+ if not image_id:
+ continue
+
+ if image_id not in lines:
+ lines[image_id] = len(lines)
+ stream.write("\n")
+
+ diff = len(lines) - lines[image_id]
+
+ # move cursor up `diff` rows
+ stream.write("%c[%dA" % (27, diff))
+
+ print_output_event(event, stream, is_terminal)
+
+ if 'id' in event:
+ # move cursor back down
+ stream.write("%c[%dB" % (27, diff))
+
+ stream.flush()
+
+ return all_events
+
+
+def print_output_event(event, stream, is_terminal):
+ if 'errorDetail' in event:
+ raise StreamOutputError(event['errorDetail']['message'])
+
+ terminator = ''
+
+ if is_terminal and 'stream' not in event:
+ # erase current line
+ stream.write("%c[2K\r" % 27)
+ terminator = "\r"
+ elif 'progressDetail' in event:
+ return
+
+ if 'time' in event:
+ stream.write("[%s] " % event['time'])
+
+ if 'id' in event:
+ stream.write("%s: " % event['id'])
+
+ if 'from' in event:
+ stream.write("(from %s) " % event['from'])
+
+ status = event.get('status', '')
+
+ if 'progress' in event:
+ stream.write("%s %s%s" % (status, event['progress'], terminator))
+ elif 'progressDetail' in event:
+ detail = event['progressDetail']
+ total = detail.get('total')
+ if 'current' in detail and total:
+ percentage = float(detail['current']) / float(total) * 100
+ stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
+ else:
+ stream.write('%s%s' % (status, terminator))
+ elif 'stream' in event:
+ stream.write("%s%s" % (event['stream'], terminator))
+ else:
+ stream.write("%s%s\n" % (status, terminator))
+
+
+def get_digest_from_pull(events):
+ for event in events:
+ status = event.get('status')
+ if not status or 'Digest' not in status:
+ continue
+
+ _, digest = status.split(':', 1)
+ return digest.strip()
+ return None
+
+
+def get_digest_from_push(events):
+ for event in events:
+ digest = event.get('aux', {}).get('Digest')
+ if digest:
+ return digest
+ return None
diff --git a/compose/project.py b/compose/project.py
new file mode 100644
index 00000000..c8b57edd
--- /dev/null
+++ b/compose/project.py
@@ -0,0 +1,674 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+import logging
+import operator
+from functools import reduce
+
+import enum
+from docker.errors import APIError
+
+from . import parallel
+from .config import ConfigurationError
+from .config.config import V1
+from .config.sort_services import get_container_name_from_network_mode
+from .config.sort_services import get_service_name_from_network_mode
+from .const import IMAGE_EVENTS
+from .const import LABEL_ONE_OFF
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+from .container import Container
+from .network import build_networks
+from .network import get_networks
+from .network import ProjectNetworks
+from .service import BuildAction
+from .service import ContainerNetworkMode
+from .service import ContainerPidMode
+from .service import ConvergenceStrategy
+from .service import NetworkMode
+from .service import PidMode
+from .service import Service
+from .service import ServiceNetworkMode
+from .service import ServicePidMode
+from .utils import microseconds_from_time_nano
+from .volume import ProjectVolumes
+
+
+log = logging.getLogger(__name__)
+
+
+@enum.unique
+class OneOffFilter(enum.Enum):
+ include = 0
+ exclude = 1
+ only = 2
+
+ @classmethod
+ def update_labels(cls, value, labels):
+ if value == cls.only:
+ labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
+ elif value == cls.exclude:
+ labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
+ elif value == cls.include:
+ pass
+ else:
+ raise ValueError("Invalid value for one_off: {}".format(repr(value)))
+
+
+class Project(object):
+ """
+ A collection of services.
+ """
+ def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
+ self.name = name
+ self.services = services
+ self.client = client
+ self.volumes = volumes or ProjectVolumes({})
+ self.networks = networks or ProjectNetworks({}, False)
+ self.config_version = config_version
+
+ def labels(self, one_off=OneOffFilter.exclude):
+ labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
+
+ OneOffFilter.update_labels(one_off, labels)
+ return labels
+
+ @classmethod
+ def from_config(cls, name, config_data, client):
+ """
+ Construct a Project from a config.Config object.
+ """
+ use_networking = (config_data.version and config_data.version != V1)
+ networks = build_networks(name, config_data, client)
+ project_networks = ProjectNetworks.from_services(
+ config_data.services,
+ networks,
+ use_networking)
+ volumes = ProjectVolumes.from_config(name, config_data, client)
+ project = cls(name, [], client, project_networks, volumes, config_data.version)
+
+ for service_dict in config_data.services:
+ service_dict = dict(service_dict)
+ if use_networking:
+ service_networks = get_networks(service_dict, networks)
+ else:
+ service_networks = {}
+
+ service_dict.pop('networks', None)
+ links = project.get_links(service_dict)
+ network_mode = project.get_network_mode(
+ service_dict, list(service_networks.keys())
+ )
+ pid_mode = project.get_pid_mode(service_dict)
+ volumes_from = get_volumes_from(project, service_dict)
+
+ if config_data.version != V1:
+ service_dict['volumes'] = [
+ volumes.namespace_spec(volume_spec)
+ for volume_spec in service_dict.get('volumes', [])
+ ]
+
+ secrets = get_secrets(
+ service_dict['name'],
+ service_dict.pop('secrets', None) or [],
+ config_data.secrets)
+
+ project.services.append(
+ Service(
+ service_dict.pop('name'),
+ client=client,
+ project=name,
+ use_networking=use_networking,
+ networks=service_networks,
+ links=links,
+ network_mode=network_mode,
+ volumes_from=volumes_from,
+ secrets=secrets,
+ pid_mode=pid_mode,
+ **service_dict)
+ )
+
+ return project
+
+ @property
+ def service_names(self):
+ return [service.name for service in self.services]
+
+ def get_service(self, name):
+ """
+ Retrieve a service by name. Raises NoSuchService
+ if the named service does not exist.
+ """
+ for service in self.services:
+ if service.name == name:
+ return service
+
+ raise NoSuchService(name)
+
+ def validate_service_names(self, service_names):
+ """
+ Validate that the given list of service names only contains valid
+ services. Raises NoSuchService if one of the names is invalid.
+ """
+ valid_names = self.service_names
+ for name in service_names:
+ if name not in valid_names:
+ raise NoSuchService(name)
+
+ def get_services(self, service_names=None, include_deps=False):
+ """
+ Returns a list of this project's services filtered
+ by the provided list of names, or all services if service_names is None
+ or [].
+
+ If include_deps is specified, returns a list including the dependencies for
+ service_names, in order of dependency.
+
+ Preserves the original order of self.services where possible,
+ reordering as needed to resolve dependencies.
+
+ Raises NoSuchService if any of the named services do not exist.
+ """
+ if service_names is None or len(service_names) == 0:
+ service_names = self.service_names
+
+ unsorted = [self.get_service(name) for name in service_names]
+ services = [s for s in self.services if s in unsorted]
+
+ if include_deps:
+ services = reduce(self._inject_deps, services, [])
+
+ uniques = []
+ [uniques.append(s) for s in services if s not in uniques]
+
+ return uniques
+
+ def get_services_without_duplicate(self, service_names=None, include_deps=False):
+ services = self.get_services(service_names, include_deps)
+ for service in services:
+ service.remove_duplicate_containers()
+ return services
+
+ def get_links(self, service_dict):
+ links = []
+ if 'links' in service_dict:
+ for link in service_dict.get('links', []):
+ if ':' in link:
+ service_name, link_name = link.split(':', 1)
+ else:
+ service_name, link_name = link, None
+ try:
+ links.append((self.get_service(service_name), link_name))
+ except NoSuchService:
+ raise ConfigurationError(
+ 'Service "%s" has a link to service "%s" which does not '
+ 'exist.' % (service_dict['name'], service_name))
+ del service_dict['links']
+ return links
+
+ def get_network_mode(self, service_dict, networks):
+ network_mode = service_dict.pop('network_mode', None)
+ if not network_mode:
+ if self.networks.use_networking:
+ return NetworkMode(networks[0]) if networks else NetworkMode('none')
+ return NetworkMode(None)
+
+ service_name = get_service_name_from_network_mode(network_mode)
+ if service_name:
+ return ServiceNetworkMode(self.get_service(service_name))
+
+ container_name = get_container_name_from_network_mode(network_mode)
+ if container_name:
+ try:
+ return ContainerNetworkMode(Container.from_id(self.client, container_name))
+ except APIError:
+ raise ConfigurationError(
+ "Service '{name}' uses the network stack of container '{dep}' which "
+ "does not exist.".format(name=service_dict['name'], dep=container_name))
+
+ return NetworkMode(network_mode)
+
+ def get_pid_mode(self, service_dict):
+ pid_mode = service_dict.pop('pid', None)
+ if not pid_mode:
+ return PidMode(None)
+
+ service_name = get_service_name_from_network_mode(pid_mode)
+ if service_name:
+ return ServicePidMode(self.get_service(service_name))
+
+ container_name = get_container_name_from_network_mode(pid_mode)
+ if container_name:
+ try:
+ return ContainerPidMode(Container.from_id(self.client, container_name))
+ except APIError:
+ raise ConfigurationError(
+ "Service '{name}' uses the PID namespace of container '{dep}' which "
+ "does not exist.".format(name=service_dict['name'], dep=container_name)
+ )
+
+ return PidMode(pid_mode)
+
+ def start(self, service_names=None, **options):
+ containers = []
+
+ def start_service(service):
+ service_containers = service.start(quiet=True, **options)
+ containers.extend(service_containers)
+
+ services = self.get_services(service_names)
+
+ def get_deps(service):
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
+
+ parallel.parallel_execute(
+ services,
+ start_service,
+ operator.attrgetter('name'),
+ 'Starting',
+ get_deps,
+ )
+
+ return containers
+
+ def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options):
+ containers = self.containers(service_names, one_off=one_off)
+
+ def get_deps(container):
+ # actually returning inversed dependencies
+ return {(other, None) for other in containers
+ if container.service in
+ self.get_service(other.service).get_dependency_names()}
+
+ parallel.parallel_execute(
+ containers,
+ self.build_container_operation_with_timeout_func('stop', options),
+ operator.attrgetter('name'),
+ 'Stopping',
+ get_deps,
+ )
+
+ def pause(self, service_names=None, **options):
+ containers = self.containers(service_names)
+ parallel.parallel_pause(reversed(containers), options)
+ return containers
+
+ def unpause(self, service_names=None, **options):
+ containers = self.containers(service_names)
+ parallel.parallel_unpause(containers, options)
+ return containers
+
+ def kill(self, service_names=None, **options):
+ parallel.parallel_kill(self.containers(service_names), options)
+
+ def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options):
+ parallel.parallel_remove(self.containers(
+ service_names, stopped=True, one_off=one_off
+ ), options)
+
+ def down(self, remove_image_type, include_volumes, remove_orphans=False):
+ self.stop(one_off=OneOffFilter.include)
+ self.find_orphan_containers(remove_orphans)
+ self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
+
+ self.networks.remove()
+
+ if include_volumes:
+ self.volumes.remove()
+
+ self.remove_images(remove_image_type)
+
+ def remove_images(self, remove_image_type):
+ for service in self.get_services():
+ service.remove_image(remove_image_type)
+
+ def restart(self, service_names=None, **options):
+ containers = self.containers(service_names, stopped=True)
+
+ parallel.parallel_execute(
+ containers,
+ self.build_container_operation_with_timeout_func('restart', options),
+ operator.attrgetter('name'),
+ 'Restarting',
+ )
+ return containers
+
+ def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
+ for service in self.get_services(service_names):
+ if service.can_be_built():
+ service.build(no_cache, pull, force_rm, build_args)
+ else:
+ log.info('%s uses an image, skipping' % service.name)
+
+ def create(
+ self,
+ service_names=None,
+ strategy=ConvergenceStrategy.changed,
+ do_build=BuildAction.none,
+ ):
+ services = self.get_services_without_duplicate(service_names, include_deps=True)
+
+ for svc in services:
+ svc.ensure_image_exists(do_build=do_build)
+ plans = self._get_convergence_plans(services, strategy)
+
+ for service in services:
+ service.execute_convergence_plan(
+ plans[service.name],
+ detached=True,
+ start=False)
+
+ def events(self, service_names=None):
+ def build_container_event(event, container):
+ time = datetime.datetime.fromtimestamp(event['time'])
+ time = time.replace(
+ microsecond=microseconds_from_time_nano(event['timeNano']))
+ return {
+ 'time': time,
+ 'type': 'container',
+ 'action': event['status'],
+ 'id': container.id,
+ 'service': container.service,
+ 'attributes': {
+ 'name': container.name,
+ 'image': event['from'],
+ },
+ 'container': container,
+ }
+
+ service_names = set(service_names or self.service_names)
+ for event in self.client.events(
+ filters={'label': self.labels()},
+ decode=True
+ ):
+ # The first part of this condition is a guard against some events
+ # broadcasted by swarm that don't have a status field.
+ # See https://github.com/docker/compose/issues/3316
+ if 'status' not in event or event['status'] in IMAGE_EVENTS:
+ # We don't receive any image events because labels aren't applied
+ # to images
+ continue
+
+ # TODO: get labels from the API v1.22 , see github issue 2618
+ try:
+ # this can fail if the container has been removed
+ container = Container.from_id(self.client, event['id'])
+ except APIError:
+ continue
+ if container.service not in service_names:
+ continue
+ yield build_container_event(event, container)
+
+ def up(self,
+ service_names=None,
+ start_deps=True,
+ strategy=ConvergenceStrategy.changed,
+ do_build=BuildAction.none,
+ timeout=None,
+ detached=False,
+ remove_orphans=False,
+ scale_override=None,
+ rescale=True,
+ start=True):
+
+ warn_for_swarm_mode(self.client)
+
+ self.initialize()
+ self.find_orphan_containers(remove_orphans)
+
+ if scale_override is None:
+ scale_override = {}
+
+ services = self.get_services_without_duplicate(
+ service_names,
+ include_deps=start_deps)
+
+ for svc in services:
+ svc.ensure_image_exists(do_build=do_build)
+ plans = self._get_convergence_plans(services, strategy)
+
+ def do(service):
+ return service.execute_convergence_plan(
+ plans[service.name],
+ timeout=timeout,
+ detached=detached,
+ scale_override=scale_override.get(service.name),
+ rescale=rescale,
+ start=start
+ )
+
+ def get_deps(service):
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
+
+ results, errors = parallel.parallel_execute(
+ services,
+ do,
+ operator.attrgetter('name'),
+ None,
+ get_deps,
+ )
+ if errors:
+ raise ProjectError(
+ 'Encountered errors while bringing up the project.'
+ )
+
+ return [
+ container
+ for svc_containers in results
+ if svc_containers is not None
+ for container in svc_containers
+ ]
+
+ def initialize(self):
+ self.networks.initialize()
+ self.volumes.initialize()
+
+ def _get_convergence_plans(self, services, strategy):
+ plans = {}
+
+ for service in services:
+ updated_dependencies = [
+ name
+ for name in service.get_dependency_names()
+ if name in plans and
+ plans[name].action in ('recreate', 'create')
+ ]
+
+ if updated_dependencies and strategy.allows_recreate:
+ log.debug('%s has upstream changes (%s)',
+ service.name,
+ ", ".join(updated_dependencies))
+ plan = service.convergence_plan(ConvergenceStrategy.always)
+ else:
+ plan = service.convergence_plan(strategy)
+
+ plans[service.name] = plan
+
+ return plans
+
+ def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
+ services = self.get_services(service_names, include_deps=False)
+
+ if parallel_pull:
+ def pull_service(service):
+ service.pull(ignore_pull_failures, True)
+
+ _, errors = parallel.parallel_execute(
+ services,
+ pull_service,
+ operator.attrgetter('name'),
+ 'Pulling',
+ limit=5,
+ )
+ if len(errors):
+ raise ProjectError(b"\n".join(errors.values()))
+ else:
+ for service in services:
+ service.pull(ignore_pull_failures, silent=silent)
+
+ def push(self, service_names=None, ignore_push_failures=False):
+ for service in self.get_services(service_names, include_deps=False):
+ service.push(ignore_push_failures)
+
+ def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
+ return list(filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters={'label': self.labels(one_off=one_off)})])
+ )
+
+ def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
+ if service_names:
+ self.validate_service_names(service_names)
+ else:
+ service_names = self.service_names
+
+ containers = self._labeled_containers(stopped, one_off)
+
+ def matches_service_names(container):
+ return container.labels.get(LABEL_SERVICE) in service_names
+
+ return [c for c in containers if matches_service_names(c)]
+
+ def find_orphan_containers(self, remove_orphans):
+ def _find():
+ containers = self._labeled_containers()
+ for ctnr in containers:
+ service_name = ctnr.labels.get(LABEL_SERVICE)
+ if service_name not in self.service_names:
+ yield ctnr
+ orphans = list(_find())
+ if not orphans:
+ return
+ if remove_orphans:
+ for ctnr in orphans:
+ log.info('Removing orphan container "{0}"'.format(ctnr.name))
+ ctnr.kill()
+ ctnr.remove(force=True)
+ else:
+ log.warning(
+ 'Found orphan containers ({0}) for this project. If '
+ 'you removed or renamed this service in your compose '
+ 'file, you can run this command with the '
+ '--remove-orphans flag to clean it up.'.format(
+ ', '.join(["{}".format(ctnr.name) for ctnr in orphans])
+ )
+ )
+
+ def _inject_deps(self, acc, service):
+ dep_names = service.get_dependency_names()
+
+ if len(dep_names) > 0:
+ dep_services = self.get_services(
+ service_names=list(set(dep_names)),
+ include_deps=True
+ )
+ else:
+ dep_services = []
+
+ dep_services.append(service)
+ return acc + dep_services
+
+ def build_container_operation_with_timeout_func(self, operation, options):
+ def container_operation_with_timeout(container):
+ if options.get('timeout') is None:
+ service = self.get_service(container.service)
+ options['timeout'] = service.stop_timeout(None)
+ return getattr(container, operation)(**options)
+ return container_operation_with_timeout
+
+
+def get_volumes_from(project, service_dict):
+ volumes_from = service_dict.pop('volumes_from', None)
+ if not volumes_from:
+ return []
+
+ def build_volume_from(spec):
+ if spec.type == 'service':
+ try:
+ return spec._replace(source=project.get_service(spec.source))
+ except NoSuchService:
+ pass
+
+ if spec.type == 'container':
+ try:
+ container = Container.from_id(project.client, spec.source)
+ return spec._replace(source=container)
+ except APIError:
+ pass
+
+ raise ConfigurationError(
+ "Service \"{}\" mounts volumes from \"{}\", which is not the name "
+ "of a service or container.".format(
+ service_dict['name'],
+ spec.source))
+
+ return [build_volume_from(vf) for vf in volumes_from]
+
+
+def get_secrets(service, service_secrets, secret_defs):
+ secrets = []
+
+ for secret in service_secrets:
+ secret_def = secret_defs.get(secret.source)
+ if not secret_def:
+ raise ConfigurationError(
+ "Service \"{service}\" uses an undefined secret \"{secret}\" "
+ .format(service=service, secret=secret.source))
+
+ if secret_def.get('external_name'):
+ log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
+ "External secrets are not available to containers created by "
+ "docker-compose.".format(service=service, secret=secret.source))
+ continue
+
+ if secret.uid or secret.gid or secret.mode:
+ log.warn(
+ "Service \"{service}\" uses secret \"{secret}\" with uid, "
+ "gid, or mode. These fields are not supported by this "
+ "implementation of the Compose file".format(
+ service=service, secret=secret.source
+ )
+ )
+
+ secrets.append({'secret': secret, 'file': secret_def.get('file')})
+
+ return secrets
+
+
+def warn_for_swarm_mode(client):
+ info = client.info()
+ if info.get('Swarm', {}).get('LocalNodeState') == 'active':
+ if info.get('ServerVersion', '').startswith('ucp'):
+ # UCP does multi-node scheduling with traditional Compose files.
+ return
+
+ log.warn(
+ "The Docker Engine you're using is running in swarm mode.\n\n"
+ "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
+ "All containers will be scheduled on the current node.\n\n"
+ "To deploy your application across the swarm, "
+ "use `docker stack deploy`.\n"
+ )
+
+
+class NoSuchService(Exception):
+ def __init__(self, name):
+ self.name = name
+ self.msg = "No such service: %s" % self.name
+
+ def __str__(self):
+ return self.msg
+
+
+class ProjectError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
diff --git a/compose/service.py b/compose/service.py
new file mode 100644
index 00000000..1a18c665
--- /dev/null
+++ b/compose/service.py
@@ -0,0 +1,1428 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import re
+import sys
+from collections import namedtuple
+from operator import attrgetter
+
+import enum
+import six
+from docker.errors import APIError
+from docker.errors import ImageNotFound
+from docker.errors import NotFound
+from docker.types import LogConfig
+from docker.utils.ports import build_port_bindings
+from docker.utils.ports import split_port
+from docker.utils.utils import convert_tmpfs_mounts
+
+from . import __version__
+from . import const
+from . import progress_stream
+from .config import DOCKER_CONFIG_KEYS
+from .config import merge_environment
+from .config.errors import DependencyError
+from .config.types import ServicePort
+from .config.types import VolumeSpec
+from .const import DEFAULT_TIMEOUT
+from .const import IS_WINDOWS_PLATFORM
+from .const import LABEL_CONFIG_HASH
+from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_ONE_OFF
+from .const import LABEL_PROJECT
+from .const import LABEL_SERVICE
+from .const import LABEL_VERSION
+from .const import NANOCPUS_SCALE
+from .container import Container
+from .errors import HealthCheckFailed
+from .errors import NoHealthCheckConfigured
+from .errors import OperationFailedError
+from .parallel import parallel_execute
+from .progress_stream import stream_output
+from .progress_stream import StreamOutputError
+from .utils import json_hash
+from .utils import parse_bytes
+from .utils import parse_seconds_float
+
+
+log = logging.getLogger(__name__)
+
+
+HOST_CONFIG_KEYS = [
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpus',
+ 'cpuset',
+ 'devices',
+ 'dns',
+ 'dns_search',
+ 'dns_opt',
+ 'env_file',
+ 'extra_hosts',
+ 'group_add',
+ 'init',
+ 'ipc',
+ 'read_only',
+ 'log_driver',
+ 'log_opt',
+ 'mem_limit',
+ 'mem_reservation',
+ 'memswap_limit',
+ 'mem_swappiness',
+ 'oom_score_adj',
+ 'pid',
+ 'pids_limit',
+ 'privileged',
+ 'restart',
+ 'security_opt',
+ 'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'userns_mode',
+ 'volumes_from',
+ 'volume_driver',
+]
+
+CONDITION_STARTED = 'service_started'
+CONDITION_HEALTHY = 'service_healthy'
+
+
+class BuildError(Exception):
+ def __init__(self, service, reason):
+ self.service = service
+ self.reason = reason
+
+
+class NeedsBuildError(Exception):
+ def __init__(self, service):
+ self.service = service
+
+
+class NoSuchImageError(Exception):
+ pass
+
+
+ServiceName = namedtuple('ServiceName', 'project service number')
+
+
+ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
+
+
+@enum.unique
+class ConvergenceStrategy(enum.Enum):
+ """Enumeration for all possible convergence strategies. Values refer to
+ when containers should be recreated.
+ """
+ changed = 1
+ always = 2
+ never = 3
+
+ @property
+ def allows_recreate(self):
+ return self is not type(self).never
+
+
+@enum.unique
+class ImageType(enum.Enum):
+ """Enumeration for the types of images known to compose."""
+ none = 0
+ local = 1
+ all = 2
+
+
+@enum.unique
+class BuildAction(enum.Enum):
+ """Enumeration for the possible build actions."""
+ none = 0
+ force = 1
+ skip = 2
+
+
+class Service(object):
+ def __init__(
+ self,
+ name,
+ client=None,
+ project='default',
+ use_networking=False,
+ links=None,
+ volumes_from=None,
+ network_mode=None,
+ networks=None,
+ secrets=None,
+ scale=None,
+ pid_mode=None,
+ **options
+ ):
+ self.name = name
+ self.client = client
+ self.project = project
+ self.use_networking = use_networking
+ self.links = links or []
+ self.volumes_from = volumes_from or []
+ self.network_mode = network_mode or NetworkMode(None)
+ self.pid_mode = pid_mode or PidMode(None)
+ self.networks = networks or {}
+ self.secrets = secrets or []
+ self.scale_num = scale or 1
+ self.options = options
+
+ def __repr__(self):
+ return '<Service: {}>'.format(self.name)
+
+ def containers(self, stopped=False, one_off=False, filters={}):
+ filters.update({'label': self.labels(one_off=one_off)})
+
+ return list(filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters=filters)]))
+
+ def get_container(self, number=1):
+ """Return a :class:`compose.container.Container` for this service. The
+ container must be active, and match `number`.
+ """
+ labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
+ for container in self.client.containers(filters={'label': labels}):
+ return Container.from_ps(self.client, container)
+
+ raise ValueError("No container found for %s_%s" % (self.name, number))
+
+ def start(self, **options):
+ containers = self.containers(stopped=True)
+ for c in containers:
+ self.start_container_if_stopped(c, **options)
+ return containers
+
+ def show_scale_warnings(self, desired_num):
+ if self.custom_container_name and desired_num > 1:
+ log.warn('The "%s" service is using the custom container name "%s". '
+ 'Docker requires each container to have a unique name. '
+ 'Remove the custom name to scale the service.'
+ % (self.name, self.custom_container_name))
+
+ if self.specifies_host_port() and desired_num > 1:
+ log.warn('The "%s" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'
+ % self.name)
+
+ def scale(self, desired_num, timeout=None):
+ """
+ Adjusts the number of containers to the specified number and ensures
+ they are running.
+
+ - creates containers until there are at least `desired_num`
+ - stops containers until there are at most `desired_num` running
+ - starts containers until there are at least `desired_num` running
+ - removes all stopped containers
+ """
+
+ self.show_scale_warnings(desired_num)
+
+ running_containers = self.containers(stopped=False)
+ num_running = len(running_containers)
+
+ if desired_num == num_running:
+ # do nothing as we already have the desired number
+ log.info('Desired container number already achieved')
+ return
+
+ if desired_num > num_running:
+ all_containers = self.containers(stopped=True)
+
+ if num_running != len(all_containers):
+ # we have some stopped containers, check for divergences
+ stopped_containers = [
+ c for c in all_containers if not c.is_running
+ ]
+
+ # Remove containers that have diverged
+ divergent_containers = [
+ c for c in stopped_containers if self._containers_have_diverged([c])
+ ]
+ for c in divergent_containers:
+ c.remove()
+
+ all_containers = list(set(all_containers) - set(divergent_containers))
+
+ sorted_containers = sorted(all_containers, key=attrgetter('number'))
+ self._execute_convergence_start(
+ sorted_containers, desired_num, timeout, True, True
+ )
+
+ if desired_num < num_running:
+ num_to_stop = num_running - desired_num
+
+ sorted_running_containers = sorted(
+ running_containers,
+ key=attrgetter('number'))
+
+ self._downscale(sorted_running_containers[-num_to_stop:], timeout)
+
+ def create_container(self,
+ one_off=False,
+ previous_container=None,
+ number=None,
+ quiet=False,
+ **override_options):
+ """
+ Create a container for this service. If the image doesn't exist, attempt to pull
+ it.
+ """
+ # This is only necessary for `scale` and `volumes_from`
+ # auto-creating containers to satisfy the dependency.
+ self.ensure_image_exists()
+
+ container_options = self._get_container_create_options(
+ override_options,
+ number or self._next_container_number(one_off=one_off),
+ one_off=one_off,
+ previous_container=previous_container,
+ )
+
+ if 'name' in container_options and not quiet:
+ log.info("Creating %s" % container_options['name'])
+
+ try:
+ return Container.create(self.client, **container_options)
+ except APIError as ex:
+ raise OperationFailedError("Cannot create container for service %s: %s" %
+ (self.name, ex.explanation))
+
+ def ensure_image_exists(self, do_build=BuildAction.none):
+ if self.can_be_built() and do_build == BuildAction.force:
+ self.build()
+ return
+
+ try:
+ self.image()
+ return
+ except NoSuchImageError:
+ pass
+
+ if not self.can_be_built():
+ self.pull()
+ return
+
+ if do_build == BuildAction.skip:
+ raise NeedsBuildError(self)
+
+ self.build()
+ log.warn(
+ "Image for service {} was built because it did not already exist. To "
+ "rebuild this image you must use `docker-compose build` or "
+ "`docker-compose up --build`.".format(self.name))
+
+ def image(self):
+ try:
+ return self.client.inspect_image(self.image_name)
+ except ImageNotFound:
+ raise NoSuchImageError("Image '{}' not found".format(self.image_name))
+
+ @property
+ def image_name(self):
+ return self.options.get('image', '{s.project}_{s.name}'.format(s=self))
+
+ def convergence_plan(self, strategy=ConvergenceStrategy.changed):
+ containers = self.containers(stopped=True)
+
+ if not containers:
+ return ConvergencePlan('create', [])
+
+ if strategy is ConvergenceStrategy.never:
+ return ConvergencePlan('start', containers)
+
+ if (
+ strategy is ConvergenceStrategy.always or
+ self._containers_have_diverged(containers)
+ ):
+ return ConvergencePlan('recreate', containers)
+
+ stopped = [c for c in containers if not c.is_running]
+
+ if stopped:
+ return ConvergencePlan('start', stopped)
+
+ return ConvergencePlan('noop', containers)
+
+ def _containers_have_diverged(self, containers):
+ config_hash = None
+
+ try:
+ config_hash = self.config_hash
+ except NoSuchImageError as e:
+ log.debug(
+ 'Service %s has diverged: %s',
+ self.name, six.text_type(e),
+ )
+ return True
+
+ has_diverged = False
+
+ for c in containers:
+ container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
+ if container_config_hash != config_hash:
+ log.debug(
+ '%s has diverged: %s != %s',
+ c.name, container_config_hash, config_hash,
+ )
+ has_diverged = True
+
+ return has_diverged
+
+ def _execute_convergence_create(self, scale, detached, start):
+ i = self._next_container_number()
+
+ def create_and_start(service, n):
+ container = service.create_container(number=n)
+ if not detached:
+ container.attach_log_stream()
+ if start:
+ self.start_container(container)
+ return container
+
+ containers, errors = parallel_execute(
+ range(i, i + scale),
+ lambda n: create_and_start(self, n),
+ lambda n: self.get_container_name(n),
+ "Creating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ return containers
+
+ def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+
+ def recreate(container):
+ return self.recreate_container(
+ container, timeout=timeout, attach_logs=not detached,
+ start_new_container=start
+ )
+ containers, errors = parallel_execute(
+ containers,
+ recreate,
+ lambda c: c.name,
+ "Recreating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
+
+ def _execute_convergence_start(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+ if start:
+ _, errors = parallel_execute(
+ containers,
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+ lambda c: c.name,
+ "Starting",
+ )
+
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
+
+ def _downscale(self, containers, timeout=None):
+ def stop_and_remove(container):
+ container.stop(timeout=self.stop_timeout(timeout))
+ container.remove()
+
+ parallel_execute(
+ containers,
+ stop_and_remove,
+ lambda c: c.name,
+ "Stopping and removing",
+ )
+
+ def execute_convergence_plan(self, plan, timeout=None, detached=False,
+ start=True, scale_override=None, rescale=True):
+ (action, containers) = plan
+ scale = scale_override if scale_override is not None else self.scale_num
+ containers = sorted(containers, key=attrgetter('number'))
+
+ self.show_scale_warnings(scale)
+
+ if action == 'create':
+ return self._execute_convergence_create(
+ scale, detached, start
+ )
+
+ # The create action needs always needs an initial scale, but otherwise,
+ # we set scale to none in no-rescale scenarios (`run` dependencies)
+ if not rescale:
+ scale = None
+
+ if action == 'recreate':
+ return self._execute_convergence_recreate(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'start':
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'noop':
+ if scale != len(containers):
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
+ for c in containers:
+ log.info("%s is up-to-date" % c.name)
+
+ return containers
+
+ raise Exception("Invalid action: {}".format(action))
+
+ def recreate_container(
+ self,
+ container,
+ timeout=None,
+ attach_logs=False,
+ start_new_container=True):
+ """Recreate a container.
+
+ The original container is renamed to a temporary name so that data
+ volumes can be copied to the new container, before the original
+ container is removed.
+ """
+ log.info("Recreating %s" % container.name)
+
+ container.stop(timeout=self.stop_timeout(timeout))
+ container.rename_to_tmp_name()
+ new_container = self.create_container(
+ previous_container=container,
+ number=container.labels.get(LABEL_CONTAINER_NUMBER),
+ quiet=True,
+ )
+ if attach_logs:
+ new_container.attach_log_stream()
+ if start_new_container:
+ self.start_container(new_container)
+ container.remove()
+ return new_container
+
+ def stop_timeout(self, timeout):
+ if timeout is not None:
+ return timeout
+ timeout = parse_seconds_float(self.options.get('stop_grace_period'))
+ if timeout is not None:
+ return timeout
+ return DEFAULT_TIMEOUT
+
+ def start_container_if_stopped(self, container, attach_logs=False, quiet=False):
+ if not container.is_running:
+ if not quiet:
+ log.info("Starting %s" % container.name)
+ if attach_logs:
+ container.attach_log_stream()
+ return self.start_container(container)
+
+ def start_container(self, container):
+ self.connect_container_to_networks(container)
+ try:
+ container.start()
+ except APIError as ex:
+ raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
+ return container
+
+ def connect_container_to_networks(self, container):
+ connected_networks = container.get('NetworkSettings.Networks')
+
+ for network, netdefs in self.networks.items():
+ if network in connected_networks:
+ if short_id_alias_exists(container, network):
+ continue
+
+ self.client.disconnect_container_from_network(
+ container.id,
+ network)
+
+ self.client.connect_container_to_network(
+ container.id, network,
+ aliases=self._get_aliases(netdefs, container),
+ ipv4_address=netdefs.get('ipv4_address', None),
+ ipv6_address=netdefs.get('ipv6_address', None),
+ links=self._get_links(False),
+ link_local_ips=netdefs.get('link_local_ips', None),
+ )
+
+ def remove_duplicate_containers(self, timeout=None):
+ for c in self.duplicate_containers():
+ log.info('Removing %s' % c.name)
+ c.stop(timeout=self.stop_timeout(timeout))
+ c.remove()
+
+ def duplicate_containers(self):
+ containers = sorted(
+ self.containers(stopped=True),
+ key=lambda c: c.get('Created'),
+ )
+
+ numbers = set()
+
+ for c in containers:
+ if c.number in numbers:
+ yield c
+ else:
+ numbers.add(c.number)
+
+ @property
+ def config_hash(self):
+ return json_hash(self.config_dict())
+
+ def config_dict(self):
+ return {
+ 'options': self.options,
+ 'image_id': self.image()['Id'],
+ 'links': self.get_link_names(),
+ 'net': self.network_mode.id,
+ 'networks': self.networks,
+ 'volumes_from': [
+ (v.source.name, v.mode)
+ for v in self.volumes_from if isinstance(v.source, Service)
+ ],
+ }
+
+ def get_dependency_names(self):
+ net_name = self.network_mode.service_name
+ pid_namespace = self.pid_mode.service_name
+ return (
+ self.get_linked_service_names() +
+ self.get_volumes_from_names() +
+ ([net_name] if net_name else []) +
+ ([pid_namespace] if pid_namespace else []) +
+ list(self.options.get('depends_on', {}).keys())
+ )
+
+ def get_dependency_configs(self):
+ net_name = self.network_mode.service_name
+ pid_namespace = self.pid_mode.service_name
+
+ configs = dict(
+ [(name, None) for name in self.get_linked_service_names()]
+ )
+ configs.update(dict(
+ [(name, None) for name in self.get_volumes_from_names()]
+ ))
+ configs.update({net_name: None} if net_name else {})
+ configs.update({pid_namespace: None} if pid_namespace else {})
+ configs.update(self.options.get('depends_on', {}))
+ for svc, config in self.options.get('depends_on', {}).items():
+ if config['condition'] == CONDITION_STARTED:
+ configs[svc] = lambda s: True
+ elif config['condition'] == CONDITION_HEALTHY:
+ configs[svc] = lambda s: s.is_healthy()
+ else:
+ # The config schema already prevents this, but it might be
+ # bypassed if Compose is called programmatically.
+ raise ValueError(
+ 'depends_on condition "{}" is invalid.'.format(
+ config['condition']
+ )
+ )
+
+ return configs
+
+ def get_linked_service_names(self):
+ return [service.name for (service, _) in self.links]
+
+ def get_link_names(self):
+ return [(service.name, alias) for service, alias in self.links]
+
+ def get_volumes_from_names(self):
+ return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
+
+ # TODO: this would benefit from github.com/docker/docker/pull/14699
+ # to remove the need to inspect every container
+ def _next_container_number(self, one_off=False):
+ containers = filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=True,
+ filters={'label': self.labels(one_off=one_off)})
+ ])
+ numbers = [c.number for c in containers]
+ return 1 if not numbers else max(numbers) + 1
+
+ def _get_aliases(self, network, container=None):
+ if container and container.labels.get(LABEL_ONE_OFF) == "True":
+ return []
+
+ return list(
+ {self.name} |
+ ({container.short_id} if container else set()) |
+ set(network.get('aliases', ()))
+ )
+
+ def build_default_networking_config(self):
+ if not self.networks:
+ return {}
+
+ network = self.networks[self.network_mode.id]
+ endpoint = {
+ 'Aliases': self._get_aliases(network),
+ 'IPAMConfig': {},
+ }
+
+ if network.get('ipv4_address'):
+ endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
+ if network.get('ipv6_address'):
+ endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
+
+ return {"EndpointsConfig": {self.network_mode.id: endpoint}}
+
+ def _get_links(self, link_to_self):
+ links = {}
+
+ for service, link_name in self.links:
+ for container in service.containers():
+ links[link_name or service.name] = container.name
+ links[container.name] = container.name
+ links[container.name_without_project] = container.name
+
+ if link_to_self:
+ for container in self.containers():
+ links[self.name] = container.name
+ links[container.name] = container.name
+ links[container.name_without_project] = container.name
+
+ for external_link in self.options.get('external_links') or []:
+ if ':' not in external_link:
+ link_name = external_link
+ else:
+ external_link, link_name = external_link.split(':')
+ links[link_name] = external_link
+
+ return [
+ (alias, container_name)
+ for (container_name, alias) in links.items()
+ ]
+
+ def _get_volumes_from(self):
+ return [build_volume_from(spec) for spec in self.volumes_from]
+
+ def _get_container_create_options(
+ self,
+ override_options,
+ number,
+ one_off=False,
+ previous_container=None):
+ add_config_hash = (not one_off and not override_options)
+
+ container_options = dict(
+ (k, self.options[k])
+ for k in DOCKER_CONFIG_KEYS if k in self.options)
+ override_volumes = override_options.pop('volumes', [])
+ container_options.update(override_options)
+
+ if not container_options.get('name'):
+ container_options['name'] = self.get_container_name(number, one_off)
+
+ container_options.setdefault('detach', True)
+
+ # If a qualified hostname was given, split it into an
+ # unqualified hostname and a domainname unless domainname
+ # was also given explicitly. This matches the behavior of
+ # the official Docker CLI in that scenario.
+ if ('hostname' in container_options and
+ 'domainname' not in container_options and
+ '.' in container_options['hostname']):
+ parts = container_options['hostname'].partition('.')
+ container_options['hostname'] = parts[0]
+ container_options['domainname'] = parts[2]
+
+ if 'ports' in container_options or 'expose' in self.options:
+ container_options['ports'] = build_container_ports(
+ formatted_ports(container_options.get('ports', [])),
+ self.options)
+
+ if 'volumes' in container_options or override_volumes:
+ container_options['volumes'] = list(set(
+ container_options.get('volumes', []) + override_volumes
+ ))
+
+ container_options['environment'] = merge_environment(
+ self.options.get('environment'),
+ override_options.get('environment'))
+
+ binds, affinity = merge_volume_bindings(
+ container_options.get('volumes') or [],
+ self.options.get('tmpfs') or [],
+ previous_container)
+ override_options['binds'] = binds
+ container_options['environment'].update(affinity)
+
+ container_options['volumes'] = dict(
+ (v.internal, {}) for v in container_options.get('volumes') or {})
+
+ secret_volumes = self.get_secret_volumes()
+ if secret_volumes:
+ override_options['binds'].extend(v.repr() for v in secret_volumes)
+ container_options['volumes'].update(
+ (v.internal, {}) for v in secret_volumes)
+
+ container_options['image'] = self.image_name
+
+ container_options['labels'] = build_container_labels(
+ container_options.get('labels', {}),
+ self.labels(one_off=one_off),
+ number,
+ self.config_hash if add_config_hash else None)
+
+ # Delete options which are only used in HostConfig
+ for key in HOST_CONFIG_KEYS:
+ container_options.pop(key, None)
+
+ container_options['host_config'] = self._get_container_host_config(
+ override_options,
+ one_off=one_off)
+
+ networking_config = self.build_default_networking_config()
+ if networking_config:
+ container_options['networking_config'] = networking_config
+
+ container_options['environment'] = format_environment(
+ container_options['environment'])
+ return container_options
+
+ def _get_container_host_config(self, override_options, one_off=False):
+ options = dict(self.options, **override_options)
+
+ logging_dict = options.get('logging', None)
+ blkio_config = convert_blkio_config(options.get('blkio_config', None))
+ log_config = get_log_config(logging_dict)
+ init_path = None
+ if isinstance(options.get('init'), six.string_types):
+ init_path = options.get('init')
+ options['init'] = True
+
+ nano_cpus = None
+ if 'cpus' in options:
+ nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE)
+
+ return self.client.create_host_config(
+ links=self._get_links(link_to_self=one_off),
+ port_bindings=build_port_bindings(
+ formatted_ports(options.get('ports', []))
+ ),
+ binds=options.get('binds'),
+ volumes_from=self._get_volumes_from(),
+ privileged=options.get('privileged', False),
+ network_mode=self.network_mode.mode,
+ devices=options.get('devices'),
+ dns=options.get('dns'),
+ dns_opt=options.get('dns_opt'),
+ dns_search=options.get('dns_search'),
+ restart_policy=options.get('restart'),
+ cap_add=options.get('cap_add'),
+ cap_drop=options.get('cap_drop'),
+ mem_limit=options.get('mem_limit'),
+ mem_reservation=options.get('mem_reservation'),
+ memswap_limit=options.get('memswap_limit'),
+ ulimits=build_ulimits(options.get('ulimits')),
+ log_config=log_config,
+ extra_hosts=options.get('extra_hosts'),
+ read_only=options.get('read_only'),
+ pid_mode=self.pid_mode.mode,
+ security_opt=options.get('security_opt'),
+ ipc_mode=options.get('ipc'),
+ cgroup_parent=options.get('cgroup_parent'),
+ cpu_quota=options.get('cpu_quota'),
+ shm_size=options.get('shm_size'),
+ sysctls=options.get('sysctls'),
+ pids_limit=options.get('pids_limit'),
+ tmpfs=options.get('tmpfs'),
+ oom_score_adj=options.get('oom_score_adj'),
+ mem_swappiness=options.get('mem_swappiness'),
+ group_add=options.get('group_add'),
+ userns_mode=options.get('userns_mode'),
+ init=options.get('init', None),
+ init_path=init_path,
+ isolation=options.get('isolation'),
+ cpu_count=options.get('cpu_count'),
+ cpu_percent=options.get('cpu_percent'),
+ nano_cpus=nano_cpus,
+ volume_driver=options.get('volume_driver'),
+ cpuset_cpus=options.get('cpuset'),
+ cpu_shares=options.get('cpu_shares'),
+ storage_opt=options.get('storage_opt'),
+ blkio_weight=blkio_config.get('weight'),
+ blkio_weight_device=blkio_config.get('weight_device'),
+ device_read_bps=blkio_config.get('device_read_bps'),
+ device_read_iops=blkio_config.get('device_read_iops'),
+ device_write_bps=blkio_config.get('device_write_bps'),
+ device_write_iops=blkio_config.get('device_write_iops'),
+ )
+
+ def get_secret_volumes(self):
+ def build_spec(secret):
+ target = secret['secret'].target
+ if target is None:
+ target = '{}/{}'.format(const.SECRETS_PATH, secret['secret'].source)
+ elif not os.path.isabs(target):
+ target = '{}/{}'.format(const.SECRETS_PATH, target)
+
+ return VolumeSpec(secret['file'], target, 'ro')
+
+ return [build_spec(secret) for secret in self.secrets]
+
+ def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
+ log.info('Building %s' % self.name)
+
+ build_opts = self.options.get('build', {})
+
+ build_args = build_opts.get('args', {}).copy()
+ if build_args_override:
+ build_args.update(build_args_override)
+
+ # python2 os.stat() doesn't support unicode on some UNIX, so we
+ # encode it to a bytestring to be safe
+ path = build_opts.get('context')
+ if not six.PY3 and not IS_WINDOWS_PLATFORM:
+ path = path.encode('utf8')
+
+ build_output = self.client.build(
+ path=path,
+ tag=self.image_name,
+ stream=True,
+ rm=True,
+ forcerm=force_rm,
+ pull=pull,
+ nocache=no_cache,
+ dockerfile=build_opts.get('dockerfile', None),
+ cache_from=build_opts.get('cache_from', None),
+ labels=build_opts.get('labels', None),
+ buildargs=build_args,
+ network_mode=build_opts.get('network', None),
+ target=build_opts.get('target', None),
+ shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
+ )
+
+ try:
+ all_events = stream_output(build_output, sys.stdout)
+ except StreamOutputError as e:
+ raise BuildError(self, six.text_type(e))
+
+ # Ensure the HTTP connection is not reused for another
+ # streaming command, as the Docker daemon can sometimes
+ # complain about it
+ self.client.close()
+
+ image_id = None
+
+ for event in all_events:
+ if 'stream' in event:
+ match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
+ if match:
+ image_id = match.group(1)
+
+ if image_id is None:
+ raise BuildError(self, event if all_events else 'Unknown')
+
+ return image_id
+
+ def can_be_built(self):
+ return 'build' in self.options
+
+ def labels(self, one_off=False):
+ return [
+ '{0}={1}'.format(LABEL_PROJECT, self.project),
+ '{0}={1}'.format(LABEL_SERVICE, self.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
+ ]
+
+ @property
+ def custom_container_name(self):
+ return self.options.get('container_name')
+
+ def get_container_name(self, number, one_off=False):
+ if self.custom_container_name and not one_off:
+ return self.custom_container_name
+
+ container_name = build_container_name(
+ self.project, self.name, number, one_off,
+ )
+ ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
+ if container_name in ext_links_origins:
+ raise DependencyError(
+ 'Service {0} has a self-referential external link: {1}'.format(
+ self.name, container_name
+ )
+ )
+ return container_name
+
+ def remove_image(self, image_type):
+ if not image_type or image_type == ImageType.none:
+ return False
+ if image_type == ImageType.local and self.options.get('image'):
+ return False
+
+ log.info("Removing image %s", self.image_name)
+ try:
+ self.client.remove_image(self.image_name)
+ return True
+ except APIError as e:
+ log.error("Failed to remove image for service %s: %s", self.name, e)
+ return False
+
+ def specifies_host_port(self):
+ def has_host_port(binding):
+ if isinstance(binding, dict):
+ external_bindings = binding.get('published')
+ else:
+ _, external_bindings = split_port(binding)
+
+ # there are no external bindings
+ if external_bindings is None:
+ return False
+
+ # we only need to check the first binding from the range
+ external_binding = external_bindings[0]
+
+ # non-tuple binding means there is a host port specified
+ if not isinstance(external_binding, tuple):
+ return True
+
+ # extract actual host port from tuple of (host_ip, host_port)
+ _, host_port = external_binding
+ if host_port is not None:
+ return True
+
+ return False
+
+ return any(has_host_port(binding) for binding in self.options.get('ports', []))
+
+ def pull(self, ignore_pull_failures=False, silent=False):
+ if 'image' not in self.options:
+ return
+
+ repo, tag, separator = parse_repository_tag(self.options['image'])
+ tag = tag or 'latest'
+ if not silent:
+ log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
+ try:
+ output = self.client.pull(repo, tag=tag, stream=True)
+ if silent:
+ with open(os.devnull, 'w') as devnull:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, devnull))
+ else:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, sys.stdout))
+ except (StreamOutputError, NotFound) as e:
+ if not ignore_pull_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def push(self, ignore_push_failures=False):
+ if 'image' not in self.options or 'build' not in self.options:
+ return
+
+ repo, tag, separator = parse_repository_tag(self.options['image'])
+ tag = tag or 'latest'
+ log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
+ output = self.client.push(repo, tag=tag, stream=True)
+
+ try:
+ return progress_stream.get_digest_from_push(
+ stream_output(output, sys.stdout))
+ except StreamOutputError as e:
+ if not ignore_push_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def is_healthy(self):
+ """ Check that all containers for this service report healthy.
+ Returns false if at least one healthcheck is pending.
+ If an unhealthy container is detected, raise a HealthCheckFailed
+ exception.
+ """
+ result = True
+ for ctnr in self.containers():
+ ctnr.inspect()
+ status = ctnr.get('State.Health.Status')
+ if status is None:
+ raise NoHealthCheckConfigured(self.name)
+ elif status == 'starting':
+ result = False
+ elif status == 'unhealthy':
+ raise HealthCheckFailed(ctnr.short_id)
+ return result
+
+
+def short_id_alias_exists(container, network):
+ aliases = container.get(
+ 'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
+ return container.short_id in aliases
+
+
+class PidMode(object):
+ def __init__(self, mode):
+ self._mode = mode
+
+ @property
+ def mode(self):
+ return self._mode
+
+ @property
+ def service_name(self):
+ return None
+
+
+class ServicePidMode(PidMode):
+ def __init__(self, service):
+ self.service = service
+
+ @property
+ def service_name(self):
+ return self.service.name
+
+ @property
+ def mode(self):
+ containers = self.service.containers()
+ if containers:
+ return 'container:' + containers[0].id
+
+ log.warn(
+ "Service %s is trying to use reuse the PID namespace "
+ "of another service that is not running." % (self.service_name)
+ )
+ return None
+
+
+class ContainerPidMode(PidMode):
+ def __init__(self, container):
+ self.container = container
+ self._mode = 'container:{}'.format(container.id)
+
+
+class NetworkMode(object):
+ """A `standard` network mode (ex: host, bridge)"""
+
+ service_name = None
+
+ def __init__(self, network_mode):
+ self.network_mode = network_mode
+
+ @property
+ def id(self):
+ return self.network_mode
+
+ mode = id
+
+
+class ContainerNetworkMode(object):
+ """A network mode that uses a container's network stack."""
+
+ service_name = None
+
+ def __init__(self, container):
+ self.container = container
+
+ @property
+ def id(self):
+ return self.container.id
+
+ @property
+ def mode(self):
+ return 'container:' + self.container.id
+
+
+class ServiceNetworkMode(object):
+ """A network mode that uses a service's network stack."""
+
+ def __init__(self, service):
+ self.service = service
+
+ @property
+ def id(self):
+ return self.service.name
+
+ service_name = id
+
+ @property
+ def mode(self):
+ containers = self.service.containers()
+ if containers:
+ return 'container:' + containers[0].id
+
+ log.warn("Service %s is trying to use reuse the network stack "
+ "of another service that is not running." % (self.id))
+ return None
+
+
+# Names
+
+
+def build_container_name(project, service, number, one_off=False):
+ bits = [project, service]
+ if one_off:
+ bits.append('run')
+ return '_'.join(bits + [str(number)])
+
+
+# Images
+
+def parse_repository_tag(repo_path):
+ """Splits image identification into base image path, tag/digest
+ and it's separator.
+
+ Example:
+
+ >>> parse_repository_tag('user/repo@sha256:digest')
+ ('user/repo', 'sha256:digest', '@')
+ >>> parse_repository_tag('user/repo:v1')
+ ('user/repo', 'v1', ':')
+ """
+ tag_separator = ":"
+ digest_separator = "@"
+
+ if digest_separator in repo_path:
+ repo, tag = repo_path.rsplit(digest_separator, 1)
+ return repo, tag, digest_separator
+
+ repo, tag = repo_path, ""
+ if tag_separator in repo_path:
+ repo, tag = repo_path.rsplit(tag_separator, 1)
+ if "/" in tag:
+ repo, tag = repo_path, ""
+
+ return repo, tag, tag_separator
+
+
+# Volumes
+
+
+def merge_volume_bindings(volumes, tmpfs, previous_container):
+ """Return a list of volume bindings for a container. Container data volumes
+ are replaced by those from the previous container.
+ """
+ affinity = {}
+
+ volume_bindings = dict(
+ build_volume_binding(volume)
+ for volume in volumes
+ if volume.external)
+
+ if previous_container:
+ old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
+ warn_on_masked_volume(volumes, old_volumes, previous_container.service)
+ volume_bindings.update(
+ build_volume_binding(volume) for volume in old_volumes)
+
+ if old_volumes:
+ affinity = {'affinity:container': '=' + previous_container.id}
+
+ return list(volume_bindings.values()), affinity
+
+
+def get_container_data_volumes(container, volumes_option, tmpfs_option):
+ """Find the container data volumes that are in `volumes_option`, and return
+ a mapping of volume bindings for those volumes.
+ """
+ volumes = []
+ volumes_option = volumes_option or []
+
+ container_mounts = dict(
+ (mount['Destination'], mount)
+ for mount in container.get('Mounts') or {}
+ )
+
+ image_volumes = [
+ VolumeSpec.parse(volume)
+ for volume in
+ container.image_config['ContainerConfig'].get('Volumes') or {}
+ ]
+
+ for volume in set(volumes_option + image_volumes):
+ # No need to preserve host volumes
+ if volume.external:
+ continue
+
+ # Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
+ if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
+ continue
+
+ mount = container_mounts.get(volume.internal)
+
+ # New volume, doesn't exist in the old container
+ if not mount:
+ continue
+
+ # Volume was previously a host volume, now it's a container volume
+ if not mount.get('Name'):
+ continue
+
+ # Copy existing volume from old container
+ volume = volume._replace(external=mount['Name'])
+ volumes.append(volume)
+
+ return volumes
+
+
+def warn_on_masked_volume(volumes_option, container_volumes, service):
+ container_volumes = dict(
+ (volume.internal, volume.external)
+ for volume in container_volumes)
+
+ for volume in volumes_option:
+ if (
+ volume.external and
+ volume.internal in container_volumes and
+ container_volumes.get(volume.internal) != volume.external
+ ):
+ log.warn((
+ "Service \"{service}\" is using volume \"{volume}\" from the "
+ "previous container. Host mapping \"{host_path}\" has no effect. "
+ "Remove the existing containers (with `docker-compose rm {service}`) "
+ "to use the host volume mapping."
+ ).format(
+ service=service,
+ volume=volume.internal,
+ host_path=volume.external))
+
+
+def build_volume_binding(volume_spec):
+ return volume_spec.internal, volume_spec.repr()
+
+
+def build_volume_from(volume_from_spec):
+ """
+ volume_from can be either a service or a container. We want to return the
+ container.id and format it into a string complete with the mode.
+ """
+ if isinstance(volume_from_spec.source, Service):
+ containers = volume_from_spec.source.containers(stopped=True)
+ if not containers:
+ return "{}:{}".format(
+ volume_from_spec.source.create_container().id,
+ volume_from_spec.mode)
+
+ container = containers[0]
+ return "{}:{}".format(container.id, volume_from_spec.mode)
+ elif isinstance(volume_from_spec.source, Container):
+ return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
+
+
+# Labels
+
+
+def build_container_labels(label_options, service_labels, number, config_hash):
+ labels = dict(label_options or {})
+ labels.update(label.split('=', 1) for label in service_labels)
+ labels[LABEL_CONTAINER_NUMBER] = str(number)
+ labels[LABEL_VERSION] = __version__
+
+ if config_hash:
+ log.debug("Added config hash: %s" % config_hash)
+ labels[LABEL_CONFIG_HASH] = config_hash
+
+ return labels
+
+
+# Ulimits
+
+
+def build_ulimits(ulimit_config):
+ if not ulimit_config:
+ return None
+ ulimits = []
+ for limit_name, soft_hard_values in six.iteritems(ulimit_config):
+ if isinstance(soft_hard_values, six.integer_types):
+ ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values})
+ elif isinstance(soft_hard_values, dict):
+ ulimit_dict = {'name': limit_name}
+ ulimit_dict.update(soft_hard_values)
+ ulimits.append(ulimit_dict)
+
+ return ulimits
+
+
+def get_log_config(logging_dict):
+ log_driver = logging_dict.get('driver', "") if logging_dict else ""
+ log_options = logging_dict.get('options', None) if logging_dict else None
+ return LogConfig(
+ type=log_driver,
+ config=log_options
+ )
+
+
+# TODO: remove once fix is available in docker-py
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, six.binary_type):
+ value = value.decode('utf-8')
+ return '{key}={value}'.format(key=key, value=value)
+ return [format_env(*item) for item in environment.items()]
+
+
+# Ports
+def formatted_ports(ports):
+ result = []
+ for port in ports:
+ if isinstance(port, ServicePort):
+ result.append(port.legacy_repr())
+ else:
+ result.append(port)
+ return result
+
+
+def build_container_ports(container_ports, options):
+ ports = []
+ all_ports = container_ports + options.get('expose', [])
+ for port_range in all_ports:
+ internal_range, _ = split_port(port_range)
+ for port in internal_range:
+ port = str(port)
+ if '/' in port:
+ port = tuple(port.split('/'))
+ ports.append(port)
+ return ports
+
+
+def convert_blkio_config(blkio_config):
+ result = {}
+ if blkio_config is None:
+ return result
+
+ result['weight'] = blkio_config.get('weight')
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ if field not in blkio_config:
+ continue
+ arr = []
+ for item in blkio_config[field]:
+ arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
+ result[field] = arr
+ return result
diff --git a/compose/state.py b/compose/state.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/compose/state.py
diff --git a/compose/timeparse.py b/compose/timeparse.py
new file mode 100644
index 00000000..16ef8a6d
--- /dev/null
+++ b/compose/timeparse.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+'''
+timeparse.py
+(c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014
+
+This is a vendored and modified copy of:
+github.com/wroberts/pytimeparse @ cc0550d
+
+It has been modified to mimic the behaviour of
+https://golang.org/pkg/time/#ParseDuration
+'''
+# MIT LICENSE
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import re
+
+HOURS = r'(?P<hours>[\d.]+)h'
+MINS = r'(?P<mins>[\d.]+)m'
+SECS = r'(?P<secs>[\d.]+)s'
+MILLI = r'(?P<milli>[\d.]+)ms'
+MICRO = r'(?P<micro>[\d.]+)(?:us|µs)'
+NANO = r'(?P<nano>[\d.]+)ns'
+
+
+def opt(x):
+ return r'(?:{x})?'.format(x=x)
+
+
+TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format(
+ HOURS=opt(HOURS),
+ MINS=opt(MINS),
+ SECS=opt(SECS),
+ MILLI=opt(MILLI),
+ MICRO=opt(MICRO),
+ NANO=opt(NANO),
+)
+
+MULTIPLIERS = dict([
+ ('hours', 60 * 60),
+ ('mins', 60),
+ ('secs', 1),
+ ('milli', 1.0 / 1000),
+ ('micro', 1.0 / 1000.0 / 1000),
+ ('nano', 1.0 / 1000.0 / 1000.0 / 1000.0),
+])
+
+
+def timeparse(sval):
+ """Parse a time expression, returning it as a number of seconds. If
+ possible, the return value will be an `int`; if this is not
+ possible, the return will be a `float`. Returns `None` if a time
+ expression cannot be parsed from the given string.
+
+ Arguments:
+ - `sval`: the string value to parse
+
+ >>> timeparse('1m24s')
+ 84
+ >>> timeparse('1.2 minutes')
+ 72
+ >>> timeparse('1.2 seconds')
+ 1.2
+ """
+ match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
+ if not match or not match.group(0).strip():
+ return
+
+ mdict = match.groupdict()
+ return sum(
+ MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
+
+
+def cast(value):
+ return int(value, 10) if value.isdigit() else float(value)
diff --git a/compose/utils.py b/compose/utils.py
new file mode 100644
index 00000000..197ae6eb
--- /dev/null
+++ b/compose/utils.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import codecs
+import hashlib
+import json
+import json.decoder
+import logging
+import ntpath
+
+import six
+from docker.errors import DockerException
+from docker.utils import parse_bytes as sdk_parse_bytes
+
+from .errors import StreamParseError
+from .timeparse import MULTIPLIERS
+from .timeparse import timeparse
+
+
+json_decoder = json.JSONDecoder()
+log = logging.getLogger(__name__)
+
+
+def get_output_stream(stream):
+ if six.PY3:
+ return stream
+ return codecs.getwriter('utf-8')(stream)
+
+
+def stream_as_text(stream):
+ """Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+
+ This function can be removed once docker-py returns text streams instead
+ of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ log.error(
+ 'Compose tried decoding the following data chunk, but failed:'
+ '\n%s' % repr(buffered)
+ )
+ raise StreamParseError(e)
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def json_hash(obj):
+ dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
+ h = hashlib.sha256()
+ h.update(dump.encode('utf8'))
+ return h.hexdigest()
+
+
+def microseconds_from_time_nano(time_nano):
+ return int(time_nano % 1000000000 / 1000)
+
+
+def nanoseconds_from_time_seconds(time_seconds):
+ return int(time_seconds / MULTIPLIERS['nano'])
+
+
+def parse_seconds_float(value):
+ return timeparse(value or '')
+
+
+def parse_nanoseconds_int(value):
+ parsed = timeparse(value or '')
+ if parsed is None:
+ return None
+ return nanoseconds_from_time_seconds(parsed)
+
+
+def build_string_dict(source_dict):
+ return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
+
+
+def splitdrive(path):
+ if len(path) == 0:
+ return ('', '')
+ if path[0] in ['.', '\\', '/', '~']:
+ return ('', path)
+ return ntpath.splitdrive(path)
+
+
+def parse_bytes(n):
+ try:
+ return sdk_parse_bytes(n)
+ except DockerException:
+ return None
diff --git a/compose/version.py b/compose/version.py
new file mode 100644
index 00000000..0532e16c
--- /dev/null
+++ b/compose/version.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from distutils.version import LooseVersion
+
+
+class ComposeVersion(LooseVersion):
+ """ A hashable version object """
+ def __hash__(self):
+ return hash(self.vstring)
diff --git a/compose/volume.py b/compose/volume.py
new file mode 100644
index 00000000..da8ba25c
--- /dev/null
+++ b/compose/volume.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from docker.errors import NotFound
+from docker.utils import version_lt
+
+from .config import ConfigurationError
+from .const import LABEL_PROJECT
+from .const import LABEL_VOLUME
+
+log = logging.getLogger(__name__)
+
+
+class Volume(object):
+ def __init__(self, client, project, name, driver=None, driver_opts=None,
+ external=False, labels=None, custom_name=False):
+ self.client = client
+ self.project = project
+ self.name = name
+ self.driver = driver
+ self.driver_opts = driver_opts
+ self.external = external
+ self.labels = labels
+ self.custom_name = custom_name
+
+ def create(self):
+ return self.client.create_volume(
+ self.full_name, self.driver, self.driver_opts, labels=self._labels
+ )
+
+ def remove(self):
+ if self.external:
+ log.info("Volume %s is external, skipping", self.full_name)
+ return
+ log.info("Removing volume %s", self.full_name)
+ return self.client.remove_volume(self.full_name)
+
+ def inspect(self):
+ return self.client.inspect_volume(self.full_name)
+
+ def exists(self):
+ try:
+ self.inspect()
+ except NotFound:
+ return False
+ return True
+
+ @property
+ def full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(self.project, self.name)
+
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_VOLUME: self.name,
+ })
+ return labels
+
+
+class ProjectVolumes(object):
+
+ def __init__(self, volumes):
+ self.volumes = volumes
+
+ @classmethod
+ def from_config(cls, name, config_data, client):
+ config_volumes = config_data.volumes or {}
+ volumes = {
+ vol_name: Volume(
+ client=client,
+ project=name,
+ name=data.get('name', vol_name),
+ driver=data.get('driver'),
+ driver_opts=data.get('driver_opts'),
+ custom_name=data.get('name') is not None,
+ labels=data.get('labels'),
+ external=bool(data.get('external', False))
+ )
+ for vol_name, data in config_volumes.items()
+ }
+ return cls(volumes)
+
+ def remove(self):
+ for volume in self.volumes.values():
+ try:
+ volume.remove()
+ except NotFound:
+ log.warn("Volume %s not found.", volume.full_name)
+
+ def initialize(self):
+ try:
+ for volume in self.volumes.values():
+ volume_exists = volume.exists()
+ if volume.external:
+ log.debug(
+ 'Volume {0} declared as external. No new '
+ 'volume will be created.'.format(volume.name)
+ )
+ if not volume_exists:
+ raise ConfigurationError(
+ 'Volume {name} declared as external, but could'
+ ' not be found. Please create the volume manually'
+ ' using `{command}{name}` and try again.'.format(
+ name=volume.full_name,
+ command='docker volume create --name='
+ )
+ )
+ continue
+
+ if not volume_exists:
+ log.info(
+ 'Creating volume "{0}" with {1} driver'.format(
+ volume.full_name, volume.driver or 'default'
+ )
+ )
+ volume.create()
+ else:
+ driver = volume.inspect()['Driver']
+ if volume.driver is not None and driver != volume.driver:
+ raise ConfigurationError(
+ 'Configuration for volume {0} specifies driver '
+ '{1}, but a volume with the same name uses a '
+ 'different driver ({3}). If you wish to use the '
+ 'new configuration, please remove the existing '
+ 'volume "{2}" first:\n'
+ '$ docker volume rm {2}'.format(
+ volume.name, volume.driver, volume.full_name,
+ volume.inspect()['Driver']
+ )
+ )
+ except NotFound:
+ raise ConfigurationError(
+ 'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
+ )
+
+ def namespace_spec(self, volume_spec):
+ if not volume_spec.is_named_volume:
+ return volume_spec
+
+ volume = self.volumes[volume_spec.external]
+ return volume_spec._replace(external=volume.full_name)
diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose
new file mode 100644
index 00000000..1fdb2770
--- /dev/null
+++ b/contrib/completion/bash/docker-compose
@@ -0,0 +1,629 @@
+#!/bin/bash
+#
+# bash completion for docker-compose
+#
+# This work is based on the completion for the docker command.
+#
+# This script provides completion of:
+# - commands and their options
+# - service names
+# - filepaths
+#
+# To enable the completions either:
+# - place this file in /etc/bash_completion.d
+# or
+# - copy this file to e.g. ~/.docker-compose-completion.sh and add the line
+# below to your .bashrc after bash completion features are loaded
+# . ~/.docker-compose-completion.sh
+
+
+__docker_compose_q() {
+ docker-compose 2>/dev/null "${top_level_options[@]}" "$@"
+}
+
+# Transforms a multiline list of strings into a single line string
+# with the words separated by "|".
+__docker_compose_to_alternatives() {
+ local parts=( $1 )
+ local IFS='|'
+ echo "${parts[*]}"
+}
+
+# Transforms a multiline list of options into an extglob pattern
+# suitable for use in case statements.
+__docker_compose_to_extglob() {
+ local extglob=$( __docker_compose_to_alternatives "$1" )
+ echo "@($extglob)"
+}
+
+# Determines whether the option passed as the first argument exist on
+# the commandline. The option may be a pattern, e.g. `--force|-f`.
+__docker_compose_has_option() {
+ local pattern="$1"
+ for (( i=2; i < $cword; ++i)); do
+ if [[ ${words[$i]} =~ ^($pattern)$ ]] ; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+# suppress trailing whitespace
+__docker_compose_nospace() {
+ # compopt is not available in ancient bash versions
+ type compopt &>/dev/null && compopt -o nospace
+}
+
+# Extracts all service names from the compose file.
+___docker_compose_all_services_in_compose_file() {
+ __docker_compose_q config --services
+}
+
+# All services, even those without an existing container
+__docker_compose_services_all() {
+ COMPREPLY=( $(compgen -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+}
+
+# All services that have an entry with the given key in their compose_file section
+___docker_compose_services_with_key() {
+ # flatten sections under "services" to one line, then filter lines containing the key and return section name
+ __docker_compose_q config \
+ | sed -n -e '/^services:/,/^[^ ]/p' \
+ | sed -n 's/^ //p' \
+ | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
+ | awk -F: -v key=": +$1:" '$0 ~ key {print $1}'
+}
+
+# All services that are defined by a Dockerfile reference
+__docker_compose_services_from_build() {
+ COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key build)" -- "$cur") )
+}
+
+# All services that are defined by an image
+__docker_compose_services_from_image() {
+ COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key image)" -- "$cur") )
+}
+
+# The services for which containers have been created, optionally filtered
+# by a boolean expression passed in as argument.
+__docker_compose_services_with() {
+ local containers names
+ containers="$(__docker_compose_q ps -q)"
+ names=$(docker 2>/dev/null inspect -f "{{if ${1:-true}}}{{range \$k, \$v := .Config.Labels}}{{if eq \$k \"com.docker.compose.service\"}}{{\$v}}{{end}}{{end}}{{end}}" $containers)
+ COMPREPLY=( $(compgen -W "$names" -- "$cur") )
+}
+
+# The services for which at least one paused container exists
+__docker_compose_services_paused() {
+ __docker_compose_services_with '.State.Paused'
+}
+
+# The services for which at least one running container exists
+__docker_compose_services_running() {
+ __docker_compose_services_with '.State.Running'
+}
+
+# The services for which at least one stopped container exists
+__docker_compose_services_stopped() {
+ __docker_compose_services_with 'not .State.Running'
+}
+
+
+_docker_compose_build() {
+ case "$prev" in
+ --build-arg)
+ COMPREPLY=( $( compgen -e -- "$cur" ) )
+ __docker_compose_nospace
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --no-cache --pull" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_from_build
+ ;;
+ esac
+}
+
+
+_docker_compose_bundle() {
+ case "$prev" in
+ --output|-o)
+ _filedir
+ return
+ ;;
+ esac
+
+ COMPREPLY=( $( compgen -W "--push-images --help --output -o" -- "$cur" ) )
+}
+
+
+_docker_compose_config() {
+ COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
+}
+
+
+_docker_compose_create() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--build --force-recreate --help --no-build --no-recreate" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_docker_compose() {
+ case "$prev" in
+ --tlscacert|--tlscert|--tlskey)
+ _filedir
+ return
+ ;;
+ --file|-f)
+ _filedir "y?(a)ml"
+ return
+ ;;
+ --project-directory)
+ _filedir -d
+ return
+ ;;
+ $(__docker_compose_to_extglob "$top_level_options_with_args") )
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "$top_level_boolean_options $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) )
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
+ ;;
+ esac
+}
+
+
+_docker_compose_down() {
+ case "$prev" in
+ --rmi)
+ COMPREPLY=( $( compgen -W "all local" -- "$cur" ) )
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --rmi --volumes -v --remove-orphans" -- "$cur" ) )
+ ;;
+ esac
+}
+
+
+_docker_compose_events() {
+ case "$prev" in
+ --json)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --json" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_exec() {
+ case "$prev" in
+ --index|--user|-u)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "-d --help --index --privileged -T --user -u" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_help() {
+ COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
+}
+
+_docker_compose_images() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+_docker_compose_kill() {
+ case "$prev" in
+ -s)
+ COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) )
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_logs() {
+ case "$prev" in
+ --tail)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--follow -f --help --no-color --tail --timestamps -t" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_pause() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_port() {
+ case "$prev" in
+ --protocol)
+ COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) )
+ return;
+ ;;
+ --index)
+ return;
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_ps() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_pull() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --parallel --quiet" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_from_image
+ ;;
+ esac
+}
+
+
+_docker_compose_push() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_restart() {
+ case "$prev" in
+ --timeout|-t)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_rm() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--force -f --help --stop -s -v" -- "$cur" ) )
+ ;;
+ *)
+ if __docker_compose_has_option "--stop|-s" ; then
+ __docker_compose_services_all
+ else
+ __docker_compose_services_stopped
+ fi
+ ;;
+ esac
+}
+
+
+_docker_compose_run() {
+ case "$prev" in
+ -e)
+ COMPREPLY=( $( compgen -e -- "$cur" ) )
+ __docker_compose_nospace
+ return
+ ;;
+ --entrypoint|--name|--user|-u|--volume|-v|--workdir|-w)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --volume -v --workdir -w" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_scale() {
+ case "$prev" in
+ =)
+ COMPREPLY=("$cur")
+ return
+ ;;
+ --timeout|-t)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
+ ;;
+ *)
+ COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+ __docker_compose_nospace
+ ;;
+ esac
+}
+
+
+_docker_compose_start() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_stopped
+ ;;
+ esac
+}
+
+
+_docker_compose_stop() {
+ case "$prev" in
+ --timeout|-t)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_top() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
+_docker_compose_unpause() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_paused
+ ;;
+ esac
+}
+
+
+_docker_compose_up() {
+ case "$prev" in
+ =)
+ COMPREPLY=("$cur")
+ return
+ ;;
+ --exit-code-from)
+ __docker_compose_services_all
+ return
+ ;;
+ --scale)
+ COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+ __docker_compose_nospace
+ return
+ ;;
+ --timeout|-t)
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--abort-on-container-exit --build -d --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --remove-orphans --scale --timeout -t" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
+
+
+_docker_compose_version() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--short" -- "$cur" ) )
+ ;;
+ esac
+}
+
+
+_docker_compose() {
+ local previous_extglob_setting=$(shopt -p extglob)
+ shopt -s extglob
+
+ local commands=(
+ build
+ bundle
+ config
+ create
+ down
+ events
+ exec
+ help
+ images
+ kill
+ logs
+ pause
+ port
+ ps
+ pull
+ push
+ restart
+ rm
+ run
+ scale
+ start
+ stop
+ top
+ unpause
+ up
+ version
+ )
+
+ # Options for the docker daemon that have to be passed to secondary calls to
+ # docker-compose executed by this script.
+ # Other global otions that are not relevant for secondary calls are defined in
+ # `_docker_compose_docker_compose`.
+ local top_level_boolean_options="
+ --skip-hostname-check
+ --tls
+ --tlsverify
+ "
+ local top_level_options_with_args="
+ --file -f
+ --host -H
+ --project-directory
+ --project-name -p
+ --tlscacert
+ --tlscert
+ --tlskey
+ "
+
+ COMPREPLY=()
+ local cur prev words cword
+ _get_comp_words_by_ref -n : cur prev words cword
+
+ # search subcommand and invoke its handler.
+ # special treatment of some top-level options
+ local command='docker_compose'
+ local top_level_options=()
+ local counter=1
+
+ while [ $counter -lt $cword ]; do
+ case "${words[$counter]}" in
+ $(__docker_compose_to_extglob "$top_level_boolean_options") )
+ local opt=${words[counter]}
+ top_level_options+=($opt)
+ ;;
+ $(__docker_compose_to_extglob "$top_level_options_with_args") )
+ local opt=${words[counter]}
+ local arg=${words[++counter]}
+ top_level_options+=($opt $arg)
+ ;;
+ -*)
+ ;;
+ *)
+ command="${words[$counter]}"
+ break
+ ;;
+ esac
+ (( counter++ ))
+ done
+
+ local completions_func=_docker_compose_${command//-/_}
+ declare -F $completions_func >/dev/null && $completions_func
+
+ eval "$previous_extglob_setting"
+ return 0
+}
+
+complete -F _docker_compose docker-compose docker-compose.exe
diff --git a/contrib/completion/fish/docker-compose.fish b/contrib/completion/fish/docker-compose.fish
new file mode 100644
index 00000000..69ecc505
--- /dev/null
+++ b/contrib/completion/fish/docker-compose.fish
@@ -0,0 +1,24 @@
+# Tab completion for docker-compose (https://github.com/docker/compose).
+# Version: 1.9.0
+
+complete -e -c docker-compose
+
+for line in (docker-compose --help | \
+ string match -r '^\s+\w+\s+[^\n]+' | \
+ string trim)
+ set -l doc (string split -m 1 ' ' -- $line)
+ complete -c docker-compose -n '__fish_use_subcommand' -xa $doc[1] --description $doc[2]
+end
+
+complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file'
+complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name'
+complete -c docker-compose -l verbose -d 'Show more output'
+complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to'
+complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify'
+complete -c docker-compose -l tlscacert -r -d 'Trust certs signed only by this CA'
+complete -c docker-compose -l tlscert -r -d 'Path to TLS certificate file'
+complete -c docker-compose -l tlskey -r -d 'Path to TLS key file'
+complete -c docker-compose -l tlsverify -d 'Use TLS and verify the remote'
+complete -c docker-compose -l skip-hostname-check -d "Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)"
+complete -c docker-compose -s h -l help -d 'Print usage'
+complete -c docker-compose -s v -l version -d 'Print version and exit'
diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose
new file mode 100644
index 00000000..f53f9633
--- /dev/null
+++ b/contrib/completion/zsh/_docker-compose
@@ -0,0 +1,474 @@
+#compdef docker-compose
+
+# Description
+# -----------
+# zsh completion for docker-compose
+# -------------------------------------------------------------------------
+# Authors
+# -------
+# * Steve Durrheimer <s.durrheimer@gmail.com>
+# -------------------------------------------------------------------------
+# Inspiration
+# -----------
+# * @albers docker-compose bash completion script
+# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
+# -------------------------------------------------------------------------
+
+__docker-compose_q() {
+ docker-compose 2>/dev/null $compose_options "$@"
+}
+
+# All services defined in docker-compose.yml
+__docker-compose_all_services_in_compose_file() {
+ local already_selected
+ local -a services
+ already_selected=$(echo $words | tr " " "|")
+ __docker-compose_q config --services \
+ | grep -Ev "^(${already_selected})$"
+}
+
+# All services, even those without an existing container
+__docker-compose_services_all() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ services=$(__docker-compose_all_services_in_compose_file)
+ _alternative "args:services:($services)" && ret=0
+
+ return ret
+}
+
+# All services that have an entry with the given key in their docker-compose.yml section
+__docker-compose_services_with_key() {
+ local already_selected
+ local -a buildable
+ already_selected=$(echo $words | tr " " "|")
+ # flatten sections to one line, then filter lines containing the key and return section name.
+ __docker-compose_q config \
+ | sed -n -e '/^services:/,/^[^ ]/p' \
+ | sed -n 's/^ //p' \
+ | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
+ | grep " \+$1:" \
+ | cut -d: -f1 \
+ | grep -Ev "^(${already_selected})$"
+}
+
+# All services that are defined by a Dockerfile reference
+__docker-compose_services_from_build() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ buildable=$(__docker-compose_services_with_key build)
+ _alternative "args:buildable services:($buildable)" && ret=0
+
+ return ret
+}
+
+# All services that are defined by an image
+__docker-compose_services_from_image() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ pullable=$(__docker-compose_services_with_key image)
+ _alternative "args:pullable services:($pullable)" && ret=0
+
+ return ret
+}
+
+__docker-compose_get_services() {
+ [[ $PREFIX = -* ]] && return 1
+ integer ret=1
+ local kind
+ declare -a running paused stopped lines args services
+
+ docker_status=$(docker ps > /dev/null 2>&1)
+ if [ $? -ne 0 ]; then
+ _message "Error! Docker is not running."
+ return 1
+ fi
+
+ kind=$1
+ shift
+ [[ $kind =~ (stopped|all) ]] && args=($args -a)
+
+ lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"})
+ services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
+
+ # Parse header line to find columns
+ local i=1 j=1 k header=${lines[1]}
+ declare -A begin end
+ while (( j < ${#header} - 1 )); do
+ i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
+ j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
+ k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
+ begin[${header[$i,$((j-1))]}]=$i
+ end[${header[$i,$((j-1))]}]=$k
+ done
+ lines=(${lines[2,-1]})
+
+ # Container ID
+ local line s name
+ local -a names
+ for line in $lines; do
+ if [[ ${services[@]} == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
+ names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
+ for name in $names; do
+ s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
+ s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
+ s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
+ if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
+ stopped=($stopped $s)
+ else
+ if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = *\(Paused\)* ]]; then
+ paused=($paused $s)
+ fi
+ running=($running $s)
+ fi
+ done
+ fi
+ done
+
+ [[ $kind =~ (running|all) ]] && _describe -t services-running "running services" running "$@" && ret=0
+ [[ $kind =~ (paused|all) ]] && _describe -t services-paused "paused services" paused "$@" && ret=0
+ [[ $kind =~ (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped "$@" && ret=0
+
+ return ret
+}
+
+__docker-compose_pausedservices() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker-compose_get_services paused "$@"
+}
+
+__docker-compose_stoppedservices() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker-compose_get_services stopped "$@"
+}
+
+__docker-compose_runningservices() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker-compose_get_services running "$@"
+}
+
+__docker-compose_services() {
+ [[ $PREFIX = -* ]] && return 1
+ __docker-compose_get_services all "$@"
+}
+
+__docker-compose_caching_policy() {
+ oldp=( "$1"(Nmh+1) ) # 1 hour
+ (( $#oldp ))
+}
+
+__docker-compose_commands() {
+ local cache_policy
+
+ zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
+ if [[ -z "$cache_policy" ]]; then
+ zstyle ":completion:${curcontext}:" cache-policy __docker-compose_caching_policy
+ fi
+
+ if ( [[ ${+_docker_compose_subcommands} -eq 0 ]] || _cache_invalid docker_compose_subcommands) \
+ && ! _retrieve_cache docker_compose_subcommands;
+ then
+ local -a lines
+ lines=(${(f)"$(_call_program commands docker-compose 2>&1)"})
+ _docker_compose_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
+ (( $#_docker_compose_subcommands > 0 )) && _store_cache docker_compose_subcommands _docker_compose_subcommands
+ fi
+ _describe -t docker-compose-commands "docker-compose command" _docker_compose_subcommands
+}
+
+__docker-compose_subcommand() {
+ local opts_help opts_force_recreate opts_no_recreate opts_no_build opts_remove_orphans opts_timeout opts_no_color opts_no_deps
+
+ opts_help='(: -)--help[Print usage]'
+ opts_force_recreate="(--no-recreate)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]"
+ opts_no_recreate="(--force-recreate)--no-recreate[If containers already exist, don't recreate them. Incompatible with --force-recreate.]"
+ opts_no_build="(--build)--no-build[Don't build an image, even if it's missing.]"
+ opts_remove_orphans="--remove-orphans[Remove containers for services not defined in the Compose file]"
+ opts_timeout=('(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: ")
+ opts_no_color='--no-color[Produce monochrome output.]'
+ opts_no_deps="--no-deps[Don't start linked services.]"
+
+ integer ret=1
+
+ case "$words[1]" in
+ (build)
+ _arguments \
+ $opts_help \
+ "*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
+ '--force-rm[Always remove intermediate containers.]' \
+ '--no-cache[Do not use cache when building the image.]' \
+ '--pull[Always attempt to pull a newer version of the image.]' \
+ '*:services:__docker-compose_services_from_build' && ret=0
+ ;;
+ (bundle)
+ _arguments \
+ $opts_help \
+ '--push-images[Automatically push images for any services which have a `build` option specified.]' \
+ '(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
+ ;;
+ (config)
+ _arguments \
+ $opts_help \
+ '(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
+ '--resolve-image-digests[Pin image tags to digests.]' \
+ '--services[Print the service names, one per line.]' \
+ '--volumes[Print the volume names, one per line.]' && ret=0
+ ;;
+ (create)
+ _arguments \
+ $opts_help \
+ $opts_force_recreate \
+ $opts_no_recreate \
+ $opts_no_build \
+ "(--no-build)--build[Build images before creating containers.]" \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (down)
+ _arguments \
+ $opts_help \
+ "--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
+ '(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
+ $opts_remove_orphans && ret=0
+ ;;
+ (events)
+ _arguments \
+ $opts_help \
+ '--json[Output events as a stream of json objects]' \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (exec)
+ _arguments \
+ $opts_help \
+ '-d[Detached mode: Run command in the background.]' \
+ '--privileged[Give extended privileges to the process.]' \
+ '(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
+ '-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
+ '--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
+ '(-):running services:__docker-compose_runningservices' \
+ '(-):command: _command_names -e' \
+ '*::arguments: _normal' && ret=0
+ ;;
+ (help)
+ _arguments ':subcommand:__docker-compose_commands' && ret=0
+ ;;
+ (images)
+ _arguments \
+ $opts_help \
+ '-q[Only display IDs]' \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (kill)
+ _arguments \
+ $opts_help \
+ '-s[SIGNAL to send to the container. Default signal is SIGKILL.]:signal:_signals' \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
+ (logs)
+ _arguments \
+ $opts_help \
+ '(-f --follow)'{-f,--follow}'[Follow log output]' \
+ $opts_no_color \
+ '--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
+ '(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (pause)
+ _arguments \
+ $opts_help \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
+ (port)
+ _arguments \
+ $opts_help \
+ '--protocol=[tcp or udp \[default: tcp\]]:protocol:(tcp udp)' \
+ '--index=[index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
+ '1:running services:__docker-compose_runningservices' \
+ '2:port:_ports' && ret=0
+ ;;
+ (ps)
+ _arguments \
+ $opts_help \
+ '-q[Only display IDs]' \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (pull)
+ _arguments \
+ $opts_help \
+ '--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
+ '*:services:__docker-compose_services_from_image' && ret=0
+ ;;
+ (push)
+ _arguments \
+ $opts_help \
+ '--ignore-push-failures[Push what it can and ignores images with push failures.]' \
+ '*:services:__docker-compose_services' && ret=0
+ ;;
+ (rm)
+ _arguments \
+ $opts_help \
+ '(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
+ '-v[Remove any anonymous volumes attached to containers]' \
+ '*:stopped services:__docker-compose_stoppedservices' && ret=0
+ ;;
+ (run)
+ _arguments \
+ $opts_help \
+ $opts_no_deps \
+ '-d[Detached mode: Run container in the background, print new container name.]' \
+ '*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
+ '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
+ '--name=[Assign a name to the container]:name: ' \
+ '(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
+ '--rm[Remove container after run. Ignored in detached mode.]' \
+ "--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
+ '-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
+ '(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
+ '(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
+ '(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
+ '(-):services:__docker-compose_services' \
+ '(-):command: _command_names -e' \
+ '*::arguments: _normal' && ret=0
+ ;;
+ (scale)
+ _arguments \
+ $opts_help \
+ $opts_timeout \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
+ (start)
+ _arguments \
+ $opts_help \
+ '*:stopped services:__docker-compose_stoppedservices' && ret=0
+ ;;
+ (stop|restart)
+ _arguments \
+ $opts_help \
+ $opts_timeout \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
+ (top)
+ _arguments \
+ $opts_help \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
+ (unpause)
+ _arguments \
+ $opts_help \
+ '*:paused services:__docker-compose_pausedservices' && ret=0
+ ;;
+ (up)
+ _arguments \
+ $opts_help \
+ '(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
+ $opts_no_color \
+ $opts_no_deps \
+ $opts_force_recreate \
+ $opts_no_recreate \
+ $opts_no_build \
+ "(--no-build)--build[Build images before starting containers.]" \
+ "(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
+ '(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
+ $opts_remove_orphans \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
+ (version)
+ _arguments \
+ $opts_help \
+ "--short[Shows only Compose's version number.]" && ret=0
+ ;;
+ (*)
+ _message 'Unknown sub command' && ret=1
+ ;;
+ esac
+
+ return ret
+}
+
+_docker-compose() {
+ # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
+ # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
+ if [[ $service != docker-compose ]]; then
+ _call_function - _$service
+ return
+ fi
+
+ local curcontext="$curcontext" state line
+ integer ret=1
+ typeset -A opt_args
+
+ local file_description
+
+ if [[ -n ${words[(r)-f]} || -n ${words[(r)--file]} ]] ; then
+ file_description="Specify an override docker-compose file (default: docker-compose.override.yml)"
+ else
+ file_description="Specify an alternate docker-compose file (default: docker-compose.yml)"
+ fi
+
+ _arguments -C \
+ '(- :)'{-h,--help}'[Get help]' \
+ '*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
+ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
+ '--verbose[Show more output]' \
+ '(- :)'{-v,--version}'[Print version and exit]' \
+ '(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
+ '--tls[Use TLS; implied by --tlsverify]' \
+ '--tlscacert=[Trust certs signed only by this CA]:ca path:' \
+ '--tlscert=[Path to TLS certificate file]:client cert path:' \
+ '--tlskey=[Path to TLS key file]:tls key path:' \
+ '--tlsverify[Use TLS and verify the remote]' \
+ "--skip-hostname-check[Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)]" \
+ '(-): :->command' \
+ '(-)*:: :->option-or-argument' && ret=0
+
+ local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
+
+ relevant_compose_flags=(
+ "--file" "-f"
+ "--host" "-H"
+ "--project-name" "-p"
+ "--tls"
+ "--tlscacert"
+ "--tlscert"
+ "--tlskey"
+ "--tlsverify"
+ "--skip-hostname-check"
+ )
+
+ relevant_docker_flags=(
+ "--host" "-H"
+ "--tls"
+ "--tlscacert"
+ "--tlscert"
+ "--tlskey"
+ "--tlsverify"
+ )
+
+ for k in "${(@k)opt_args}"; do
+ if [[ -n "${relevant_docker_flags[(r)$k]}" ]]; then
+ docker_options+=$k
+ if [[ -n "$opt_args[$k]" ]]; then
+ docker_options+=$opt_args[$k]
+ fi
+ fi
+ if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
+ compose_options+=$k
+ if [[ -n "$opt_args[$k]" ]]; then
+ compose_options+=$opt_args[$k]
+ fi
+ fi
+ done
+
+ case $state in
+ (command)
+ __docker-compose_commands && ret=0
+ ;;
+ (option-or-argument)
+ curcontext=${curcontext%:*:*}:docker-compose-$words[1]:
+ __docker-compose_subcommand && ret=0
+ ;;
+ esac
+
+ return ret
+}
+
+_docker-compose "$@"
diff --git a/contrib/migration/migrate-compose-file-v1-to-v2.py b/contrib/migration/migrate-compose-file-v1-to-v2.py
new file mode 100755
index 00000000..c1785b0d
--- /dev/null
+++ b/contrib/migration/migrate-compose-file-v1-to-v2.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+"""
+Migrate a Compose file from the V1 format in Compose 1.5 to the V2 format
+supported by Compose 1.6+
+"""
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import argparse
+import logging
+import sys
+
+import ruamel.yaml
+
+from compose.config.types import VolumeSpec
+
+
+log = logging.getLogger('migrate')
+
+
+def migrate(content):
+ data = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
+
+ service_names = data.keys()
+
+ for name, service in data.items():
+ warn_for_links(name, service)
+ warn_for_external_links(name, service)
+ rewrite_net(service, service_names)
+ rewrite_build(service)
+ rewrite_logging(service)
+ rewrite_volumes_from(service, service_names)
+
+ services = {name: data.pop(name) for name in data.keys()}
+
+ data['version'] = "2"
+ data['services'] = services
+ create_volumes_section(data)
+
+ return data
+
+
+def warn_for_links(name, service):
+ links = service.get('links')
+ if links:
+ example_service = links[0].partition(':')[0]
+ log.warn(
+ "Service {name} has links, which no longer create environment "
+ "variables such as {example_service_upper}_PORT. "
+ "If you are using those in your application code, you should "
+ "instead connect directly to the hostname, e.g. "
+ "'{example_service}'."
+ .format(name=name, example_service=example_service,
+ example_service_upper=example_service.upper()))
+
+
+def warn_for_external_links(name, service):
+ external_links = service.get('external_links')
+ if external_links:
+ log.warn(
+ "Service {name} has external_links: {ext}, which now work "
+ "slightly differently. In particular, two containers must be "
+ "connected to at least one network in common in order to "
+ "communicate, even if explicitly linked together.\n\n"
+ "Either connect the external container to your app's default "
+ "network, or connect both the external container and your "
+ "service's containers to a pre-existing network. See "
+ "https://docs.docker.com/compose/networking/ "
+ "for more on how to do this."
+ .format(name=name, ext=external_links))
+
+
+def rewrite_net(service, service_names):
+ if 'net' in service:
+ network_mode = service.pop('net')
+
+ # "container:<service name>" is now "service:<service name>"
+ if network_mode.startswith('container:'):
+ name = network_mode.partition(':')[2]
+ if name in service_names:
+ network_mode = 'service:{}'.format(name)
+
+ service['network_mode'] = network_mode
+
+
+def rewrite_build(service):
+ if 'dockerfile' in service:
+ service['build'] = {
+ 'context': service.pop('build'),
+ 'dockerfile': service.pop('dockerfile'),
+ }
+
+
+def rewrite_logging(service):
+ if 'log_driver' in service:
+ service['logging'] = {'driver': service.pop('log_driver')}
+ if 'log_opt' in service:
+ service['logging']['options'] = service.pop('log_opt')
+
+
+def rewrite_volumes_from(service, service_names):
+ for idx, volume_from in enumerate(service.get('volumes_from', [])):
+ if volume_from.split(':', 1)[0] not in service_names:
+ service['volumes_from'][idx] = 'container:%s' % volume_from
+
+
+def create_volumes_section(data):
+ named_volumes = get_named_volumes(data['services'])
+ if named_volumes:
+ log.warn(
+ "Named volumes ({names}) must be explicitly declared. Creating a "
+ "'volumes' section with declarations.\n\n"
+ "For backwards-compatibility, they've been declared as external. "
+ "If you don't mind the volume names being prefixed with the "
+ "project name, you can remove the 'external' option from each one."
+ .format(names=', '.join(list(named_volumes))))
+
+ data['volumes'] = named_volumes
+
+
+def get_named_volumes(services):
+ volume_specs = [
+ VolumeSpec.parse(volume)
+ for service in services.values()
+ for volume in service.get('volumes', [])
+ ]
+ names = {
+ spec.external
+ for spec in volume_specs
+ if spec.is_named_volume
+ }
+ return {name: {'external': True} for name in names}
+
+
+def write(stream, new_format, indent, width):
+ ruamel.yaml.dump(
+ new_format,
+ stream,
+ Dumper=ruamel.yaml.RoundTripDumper,
+ indent=indent,
+ width=width)
+
+
+def parse_opts(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("filename", help="Compose file filename.")
+ parser.add_argument("-i", "--in-place", action='store_true')
+ parser.add_argument(
+ "--indent", type=int, default=2,
+ help="Number of spaces used to indent the output yaml.")
+ parser.add_argument(
+ "--width", type=int, default=80,
+ help="Number of spaces used as the output width.")
+ return parser.parse_args()
+
+
+def main(args):
+ logging.basicConfig(format='\033[33m%(levelname)s:\033[37m %(message)s\033[0m\n')
+
+ opts = parse_opts(args)
+
+ with open(opts.filename, 'r') as fh:
+ new_format = migrate(fh.read())
+
+ if opts.in_place:
+ output = open(opts.filename, 'w')
+ else:
+ output = sys.stdout
+ write(output, new_format, opts.indent, opts.width)
+
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/docker-compose.spec b/docker-compose.spec
new file mode 100644
index 00000000..9c46421f
--- /dev/null
+++ b/docker-compose.spec
@@ -0,0 +1,81 @@
+# -*- mode: python -*-
+
+block_cipher = None
+
+a = Analysis(['bin/docker-compose'],
+ pathex=['.'],
+ hiddenimports=[],
+ hookspath=None,
+ runtime_hooks=None,
+ cipher=block_cipher)
+
+pyz = PYZ(a.pure, cipher=block_cipher)
+
+exe = EXE(pyz,
+ a.scripts,
+ a.binaries,
+ a.zipfiles,
+ a.datas,
+ [
+ (
+ 'compose/config/config_schema_v1.json',
+ 'compose/config/config_schema_v1.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.0.json',
+ 'compose/config/config_schema_v2.0.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.1.json',
+ 'compose/config/config_schema_v2.1.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.2.json',
+ 'compose/config/config_schema_v2.2.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.3.json',
+ 'compose/config/config_schema_v2.3.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.0.json',
+ 'compose/config/config_schema_v3.0.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.1.json',
+ 'compose/config/config_schema_v3.1.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.2.json',
+ 'compose/config/config_schema_v3.2.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.3.json',
+ 'compose/config/config_schema_v3.3.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.4.json',
+ 'compose/config/config_schema_v3.4.json',
+ 'DATA'
+ ),
+ (
+ 'compose/GITSHA',
+ 'compose/GITSHA',
+ 'DATA'
+ )
+ ],
+
+ name='docker-compose',
+ debug=False,
+ strip=None,
+ upx=True,
+ console=True)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..50c91d20
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,16 @@
+# The docs have been moved!
+
+The documentation for Compose has been merged into
+[the general documentation repo](https://github.com/docker/docker.github.io).
+
+The docs for Compose are now here:
+https://github.com/docker/docker.github.io/tree/master/compose
+
+Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
+
+If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
+
+PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
+
+As always, the docs remain open-source and we appreciate your feedback and
+pull requests!
diff --git a/experimental/compose_swarm_networking.md b/experimental/compose_swarm_networking.md
new file mode 100644
index 00000000..905f52f8
--- /dev/null
+++ b/experimental/compose_swarm_networking.md
@@ -0,0 +1,5 @@
+# Experimental: Compose, Swarm and Multi-Host Networking
+
+Compose now supports multi-host networking as standard. Read more here:
+
+https://docs.docker.com/compose/networking
diff --git a/logo.png b/logo.png
new file mode 100644
index 00000000..9bc5eb2f
--- /dev/null
+++ b/logo.png
Binary files differ
diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md
new file mode 100644
index 00000000..b89cdc24
--- /dev/null
+++ b/project/ISSUE-TRIAGE.md
@@ -0,0 +1,35 @@
+Triaging of issues
+------------------
+
+The docker-compose issue triage process follows
+https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md
+with the following additions or exceptions.
+
+
+### Classify the Issue
+
+The following labels are provided in additional to the standard labels:
+
+| Kind | Description |
+|--------------|-------------------------------------------------------------------|
+| kind/cleanup | A refactor or improvement that is related to quality not function |
+| kind/parity | A request for feature parity with docker cli |
+
+
+### Functional areas
+
+Most issues should fit into one of the following functional areas:
+
+| Area |
+|-----------------|
+| area/build |
+| area/cli |
+| area/config |
+| area/logs |
+| area/networking |
+| area/packaging |
+| area/run |
+| area/scale |
+| area/tests |
+| area/up |
+| area/volumes |
diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md
new file mode 100644
index 00000000..5b30545f
--- /dev/null
+++ b/project/RELEASE-PROCESS.md
@@ -0,0 +1,148 @@
+Building a Compose release
+==========================
+
+## Prerequisites
+
+The release scripts require the following tools installed on the host:
+
+* https://hub.github.com/
+* https://stedolan.github.io/jq/
+* http://pandoc.org/
+
+## To get started with a new release
+
+Create a branch, update version, and add release notes by running `make-branch`
+
+ ./script/release/make-branch $VERSION [$BASE_VERSION]
+
+`$BASE_VERSION` will default to master. Use the last version tag for a bug fix
+release.
+
+As part of this script you'll be asked to:
+
+1. Update the version in `compose/__init__.py` and `script/run/run.sh`.
+
+ If the next release will be an RC, append `-rcN`, e.g. `1.4.0-rc1`.
+
+2. Write release notes in `CHANGELOG.md`.
+
+ Almost every feature enhancement should be mentioned, with the most
+ visible/exciting ones first. Use descriptive sentences and give context
+ where appropriate.
+
+ Bug fixes are worth mentioning if it's likely that they've affected lots
+ of people, or if they were regressions in the previous version.
+
+ Improvements to the code are not worth mentioning.
+
+3. Create a new repository on [bintray](https://bintray.com/docker-compose).
+ The name has to match the name of the branch (e.g. `bump-1.9.0`) and the
+ type should be "Generic". Other fields can be left blank.
+
+4. Check that the `vnext-compose` branch on
+ [the docs repo](https://github.com/docker/docker.github.io/) has
+ documentation for all the new additions in the upcoming release, and create
+ a PR there for what needs to be amended.
+
+
+## When a PR is merged into master that we want in the release
+
+1. Check out the bump branch and run the cherry pick script
+
+ git checkout bump-$VERSION
+ ./script/release/cherry-pick-pr $PR_NUMBER
+
+2. When you are done cherry-picking branches move the bump version commit to HEAD
+
+ ./script/release/rebase-bump-commit
+ git push --force $USERNAME bump-$VERSION
+
+
+## To release a version (whether RC or stable)
+
+Check out the bump branch and run the `build-binaries` script
+
+ git checkout bump-$VERSION
+ ./script/release/build-binaries
+
+When prompted build the non-linux binaries and test them.
+
+1. Download the different platform binaries by running the following script:
+
+ `./script/release/download-binaries $VERSION`
+
+ The binaries for Linux, OSX and Windows will be downloaded in the `binaries-$VERSION` folder.
+
+3. Draft a release from the tag on GitHub (the `build-binaries` script will open the window for
+ you)
+
+ The tag will only be present on Github when you run the `push-release`
+ script in step 7, but you can pre-fill it at that point.
+
+4. Paste in installation instructions and release notes. Here's an example -
+ change the Compose version and Docker version as appropriate:
+
+ If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
+
+ Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
+
+ Alternatively, you can use the usual commands to install or upgrade Compose:
+
+ ```
+ curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
+ chmod +x /usr/local/bin/docker-compose
+ ```
+
+ See the [install docs](https://docs.docker.com/compose/install/) for more install options and instructions.
+
+ ## Compose file format compatibility matrix
+
+ | Compose file format | Docker Engine |
+ | --- | --- |
+ | 3.3 | 17.06.0+ |
+ | 3.0 &ndash; 3.2 | 1.13.0+ |
+ | 2.3| 17.06.0+ |
+ | 2.2 | 1.13.0+ |
+ | 2.1 | 1.12.0+ |
+ | 2.0 | 1.10.0+ |
+ | 1.0 | 1.9.1+ |
+
+ ## Changes
+
+ ...release notes go here...
+
+5. Attach the binaries and `script/run/run.sh`
+
+6. Add "Thanks" with a list of contributors. The contributor list can be generated
+ by running `./script/release/contributors`.
+
+7. If everything looks good, it's time to push the release.
+
+
+ ./script/release/push-release
+
+
+8. Merge the bump PR.
+
+8. Publish the release on GitHub.
+
+9. Check that all the binaries download (following the install instructions) and run.
+
+10. Announce the release on the appropriate Slack channel(s).
+
+## If it’s a stable release (not an RC)
+
+1. Close the release’s milestone.
+
+## If it’s a minor release (1.x.0), rather than a patch release (1.x.y)
+
+1. Open a PR against `master` to:
+
+ - update `CHANGELOG.md` to bring it in line with `release`
+ - bump the version in `compose/__init__.py` to the *next* minor version number with `dev` appended. For example, if you just released `1.4.0`, update it to `1.5.0dev`.
+
+2. Get the PR merged.
+
+## Finally
+
+1. Celebrate, however you’d like.
diff --git a/requirements-build.txt b/requirements-build.txt
new file mode 100644
index 00000000..27f610ca
--- /dev/null
+++ b/requirements-build.txt
@@ -0,0 +1 @@
+pyinstaller==3.2.1
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 00000000..e06cad45
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,5 @@
+coverage==3.7.1
+flake8==3.5.0
+mock>=1.0.1
+pytest==2.7.2
+pytest-cov==2.1.0
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..beeaa285
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,22 @@
+backports.ssl-match-hostname==3.5.0.1; python_version < '3'
+cached-property==1.3.0
+certifi==2017.4.17
+chardet==3.0.4
+colorama==0.3.9; sys_platform == 'win32'
+docker==2.5.1
+docker-pycreds==0.2.1
+dockerpty==0.4.1
+docopt==0.6.2
+enum34==1.1.6; python_version < '3.4'
+functools32==3.2.3.post2; python_version < '3.2'
+idna==2.5
+ipaddress==1.0.18
+jsonschema==2.6.0
+pypiwin32==219; sys_platform == 'win32'
+PySocks==1.6.7
+PyYAML==3.12
+requests==2.11.1
+six==1.10.0
+texttable==0.9.1
+urllib3==1.21.1
+websocket-client==0.32.0
diff --git a/script/build/image b/script/build/image
new file mode 100755
index 00000000..a3198c99
--- /dev/null
+++ b/script/build/image
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e
+
+if [ -z "$1" ]; then
+ >&2 echo "First argument must be image tag."
+ exit 1
+fi
+
+TAG=$1
+
+VERSION="$(python setup.py --version)"
+
+./script/build/write-git-sha
+python setup.py sdist bdist_wheel
+./script/build/linux
+docker build -t docker/compose:$TAG -f Dockerfile.run .
diff --git a/script/build/linux b/script/build/linux
new file mode 100755
index 00000000..1a4cd4d9
--- /dev/null
+++ b/script/build/linux
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -ex
+
+./script/clean
+
+TAG="docker-compose"
+docker build -t "$TAG" . | tail -n 200
+docker run \
+ --rm --entrypoint="script/build/linux-entrypoint" \
+ -v $(pwd)/dist:/code/dist \
+ -v $(pwd)/.git:/code/.git \
+ "$TAG"
diff --git a/script/build/linux-entrypoint b/script/build/linux-entrypoint
new file mode 100755
index 00000000..bf515060
--- /dev/null
+++ b/script/build/linux-entrypoint
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -ex
+
+TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
+VENV=/code/.tox/py27
+
+mkdir -p `pwd`/dist
+chmod 777 `pwd`/dist
+
+$VENV/bin/pip install -q -r requirements-build.txt
+./script/build/write-git-sha
+su -c "$VENV/bin/pyinstaller docker-compose.spec" user
+mv dist/docker-compose $TARGET
+$TARGET version
diff --git a/script/build/osx b/script/build/osx
new file mode 100755
index 00000000..3de34576
--- /dev/null
+++ b/script/build/osx
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -ex
+
+PATH="/usr/local/bin:$PATH"
+
+rm -rf venv
+
+virtualenv -p /usr/local/bin/python venv
+venv/bin/pip install -r requirements.txt
+venv/bin/pip install -r requirements-build.txt
+venv/bin/pip install --no-deps .
+./script/build/write-git-sha
+venv/bin/pyinstaller docker-compose.spec
+mv dist/docker-compose dist/docker-compose-Darwin-x86_64
+dist/docker-compose-Darwin-x86_64 version
diff --git a/script/build/test-image b/script/build/test-image
new file mode 100755
index 00000000..a2eb62cd
--- /dev/null
+++ b/script/build/test-image
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e
+
+if [ -z "$1" ]; then
+ >&2 echo "First argument must be image tag."
+ exit 1
+fi
+
+TAG=$1
+
+docker build -t docker-compose-tests:tmp .
+ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp)
+docker commit $ctnr_id docker/compose-tests:latest
+docker tag docker/compose-tests:latest docker/compose-tests:$TAG
+docker rm -f $ctnr_id
+docker rmi -f docker-compose-tests:tmp
diff --git a/script/build/windows.ps1 b/script/build/windows.ps1
new file mode 100644
index 00000000..db643274
--- /dev/null
+++ b/script/build/windows.ps1
@@ -0,0 +1,60 @@
+# Builds the Windows binary.
+#
+# From a fresh 64-bit Windows 10 install, prepare the system as follows:
+#
+# 1. Install Git:
+#
+# http://git-scm.com/download/win
+#
+# 2. Install Python 2.7.10:
+#
+# https://www.python.org/downloads/
+#
+# 3. Append ";C:\Python27;C:\Python27\Scripts" to the "Path" environment variable:
+#
+# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
+#
+# 4. In Powershell, run the following commands:
+#
+# $ pip install virtualenv
+# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
+#
+# 5. Clone the repository:
+#
+# $ git clone https://github.com/docker/compose.git
+# $ cd compose
+#
+# 6. Build the binary:
+#
+# .\script\build\windows.ps1
+
+$ErrorActionPreference = "Stop"
+
+# Remove virtualenv
+if (Test-Path venv) {
+ Remove-Item -Recurse -Force .\venv
+}
+
+# Remove .pyc files
+Get-ChildItem -Recurse -Include *.pyc | foreach ($_) { Remove-Item $_.FullName }
+
+# Create virtualenv
+virtualenv .\venv
+
+# pip and pyinstaller generate lots of warnings, so we need to ignore them
+$ErrorActionPreference = "Continue"
+
+# Install dependencies
+.\venv\Scripts\pip install pypiwin32==219
+.\venv\Scripts\pip install -r requirements.txt
+.\venv\Scripts\pip install --no-deps .
+.\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt
+
+git rev-parse --short HEAD | out-file -encoding ASCII compose\GITSHA
+
+# Build binary
+.\venv\Scripts\pyinstaller .\docker-compose.spec
+$ErrorActionPreference = "Stop"
+
+Move-Item -Force .\dist\docker-compose.exe .\dist\docker-compose-Windows-x86_64.exe
+.\dist\docker-compose-Windows-x86_64.exe --version
diff --git a/script/build/write-git-sha b/script/build/write-git-sha
new file mode 100755
index 00000000..d16743c6
--- /dev/null
+++ b/script/build/write-git-sha
@@ -0,0 +1,7 @@
+#!/bin/bash
+#
+# Write the current commit sha to the file GITSHA. This file is included in
+# packaging so that `docker-compose version` can include the git sha.
+#
+set -e
+git rev-parse --short HEAD > compose/GITSHA
diff --git a/script/ci b/script/ci
new file mode 100755
index 00000000..34bf9a4b
--- /dev/null
+++ b/script/ci
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# Backwards compatibility for jenkins
+#
+# TODO: remove this script after all current PRs and jenkins are updated with
+# the new script/test/ci change
+set -e
+exec script/test/ci
diff --git a/script/clean b/script/clean
new file mode 100755
index 00000000..fb7ba3be
--- /dev/null
+++ b/script/clean
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -e
+
+find . -type f -name '*.pyc' -delete
+find . -name .coverage.* -delete
+find . -name __pycache__ -delete
+rm -rf docs/_site build dist docker-compose.egg-info
diff --git a/script/release/build-binaries b/script/release/build-binaries
new file mode 100755
index 00000000..a39b186d
--- /dev/null
+++ b/script/release/build-binaries
@@ -0,0 +1,40 @@
+#!/bin/bash
+#
+# Build the release binaries
+#
+
+. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
+
+function usage() {
+ >&2 cat << EOM
+Build binaries for the release.
+
+This script requires that 'git config branch.${BRANCH}.release' is set to the
+release version for the release branch.
+
+EOM
+ exit 1
+}
+
+BRANCH="$(git rev-parse --abbrev-ref HEAD)"
+VERSION="$(git config "branch.${BRANCH}.release")" || usage
+REPO=docker/compose
+
+# Build the binaries
+script/clean
+script/build/linux
+
+echo "Building the container distribution"
+script/build/image $VERSION
+
+echo "Building the compose-tests image"
+script/build/test-image $VERSION
+
+echo "Create a github release"
+# TODO: script more of this https://developer.github.com/v3/repos/releases/
+browser https://github.com/$REPO/releases/new
+
+echo "Don't forget to download the osx and windows binaries from appveyor/bintray\!"
+echo "https://dl.bintray.com/docker-compose/$BRANCH/"
+echo "https://ci.appveyor.com/project/docker/compose"
+echo
diff --git a/script/release/cherry-pick-pr b/script/release/cherry-pick-pr
new file mode 100755
index 00000000..f4a5a740
--- /dev/null
+++ b/script/release/cherry-pick-pr
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Cherry-pick a PR into the release branch
+#
+
+set -e
+set -o pipefail
+
+
+function usage() {
+ >&2 cat << EOM
+Cherry-pick commits from a github pull request.
+
+Usage:
+
+ $0 <github PR number>
+EOM
+ exit 1
+}
+
+[ -n "$1" ] || usage
+
+if [ -z "$(command -v hub 2> /dev/null)" ]; then
+ >&2 echo "$0 requires https://hub.github.com/."
+ >&2 echo "Please install it and make sure it is available on your \$PATH."
+ exit 2
+fi
+
+
+REPO=docker/compose
+GITHUB=https://github.com/$REPO/pull
+PR=$1
+url="$GITHUB/$PR"
+hub am -3 $url
diff --git a/script/release/contributors b/script/release/contributors
new file mode 100755
index 00000000..4657dd80
--- /dev/null
+++ b/script/release/contributors
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -e
+
+
+function usage() {
+ >&2 cat << EOM
+Print the list of github contributors for the release
+
+Usage:
+
+ $0 <previous release tag>
+EOM
+ exit 1
+}
+
+[[ -n "$1" ]] || usage
+PREV_RELEASE=$1
+BRANCH="$(git rev-parse --abbrev-ref HEAD)"
+URL="https://api.github.com/repos/docker/compose/compare"
+
+contribs=$(curl -sf "$URL/$PREV_RELEASE...$BRANCH" | \
+ jq -r '.commits[].author.login' | \
+ sort | \
+ uniq -c | \
+ sort -nr)
+
+echo "Contributions by user: "
+echo "$contribs"
+echo
+echo "$contribs" | awk '{print "@"$2","}' | xargs
diff --git a/script/release/download-binaries b/script/release/download-binaries
new file mode 100755
index 00000000..5d01f5f7
--- /dev/null
+++ b/script/release/download-binaries
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+function usage() {
+ >&2 cat << EOM
+Download Linux, Mac OS and Windows binaries from remote endpoints
+
+Usage:
+
+ $0 <version>
+
+Options:
+
+ version version string for the release (ex: 1.6.0)
+
+EOM
+ exit 1
+}
+
+
+[ -n "$1" ] || usage
+VERSION=$1
+BASE_BINTRAY_URL=https://dl.bintray.com/docker-compose/bump-$VERSION/
+DESTINATION=binaries-$VERSION
+APPVEYOR_URL=https://ci.appveyor.com/api/projects/docker/compose/\
+artifacts/dist%2Fdocker-compose-Windows-x86_64.exe?branch=bump-$VERSION
+
+mkdir $DESTINATION
+
+
+wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
+wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
+wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
diff --git a/script/release/make-branch b/script/release/make-branch
new file mode 100755
index 00000000..b8a0cd31
--- /dev/null
+++ b/script/release/make-branch
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# Prepare a new release branch
+#
+
+. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
+
+function usage() {
+ >&2 cat << EOM
+Create a new release branch 'release-<version>'
+
+Usage:
+
+ $0 <version> [<base_version>]
+
+Options:
+
+ version version string for the release (ex: 1.6.0)
+ base_version branch or tag to start from. Defaults to master. For
+ bug-fix releases use the previous stage release tag.
+
+EOM
+ exit 1
+}
+
+
+[ -n "$1" ] || usage
+VERSION=$1
+BRANCH=bump-$VERSION
+REPO=docker/compose
+GITHUB_REPO=git@github.com:$REPO
+
+if [ -z "$2" ]; then
+ BASE_VERSION="master"
+else
+ BASE_VERSION=$2
+fi
+
+
+DEFAULT_REMOTE=release
+REMOTE="$(find_remote "$GITHUB_REPO")"
+# If we don't have a docker remote add one
+if [ -z "$REMOTE" ]; then
+ echo "Creating $DEFAULT_REMOTE remote"
+ git remote add ${DEFAULT_REMOTE} ${GITHUB_REPO}
+fi
+
+# handle the difference between a branch and a tag
+if [ -z "$(git name-rev --tags $BASE_VERSION | grep tags)" ]; then
+ BASE_VERSION=$REMOTE/$BASE_VERSION
+fi
+
+echo "Creating a release branch $VERSION from $BASE_VERSION"
+read -n1 -r -p "Continue? (ctrl+c to cancel)"
+git fetch $REMOTE -p
+git checkout -b $BRANCH $BASE_VERSION
+
+echo "Merging remote release branch into new release branch"
+git merge --strategy=ours --no-edit $REMOTE/release
+
+# Store the release version for this branch in git, so that other release
+# scripts can use it
+git config "branch.${BRANCH}.release" $VERSION
+
+
+editor=${EDITOR:-vim}
+
+echo "Update versions in compose/__init__.py, script/run/run.sh"
+$editor compose/__init__.py
+$editor script/run/run.sh
+
+
+echo "Write release notes in CHANGELOG.md"
+browser "https://github.com/docker/compose/issues?q=milestone%3A$VERSION+is%3Aclosed"
+$editor CHANGELOG.md
+
+
+git diff
+echo "Verify changes before commit. Exit the shell to commit changes"
+$SHELL || true
+git commit -a -m "Bump $VERSION" --signoff --no-verify
+
+
+echo "Push branch to docker remote"
+git push $REMOTE
+browser https://github.com/$REPO/compare/docker:release...$BRANCH?expand=1
diff --git a/script/release/push-release b/script/release/push-release
new file mode 100755
index 00000000..0578aaff
--- /dev/null
+++ b/script/release/push-release
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Create the official release
+#
+
+. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
+
+function usage() {
+ >&2 cat << EOM
+Publish a release by building all artifacts and pushing them.
+
+This script requires that 'git config branch.${BRANCH}.release' is set to the
+release version for the release branch.
+
+EOM
+ exit 1
+}
+
+BRANCH="$(git rev-parse --abbrev-ref HEAD)"
+VERSION="$(git config "branch.${BRANCH}.release")" || usage
+
+if [ -z "$(command -v jq 2> /dev/null)" ]; then
+ >&2 echo "$0 requires https://stedolan.github.io/jq/"
+ >&2 echo "Please install it and make sure it is available on your \$PATH."
+ exit 2
+fi
+
+
+if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
+ >&2 echo "$0 requires http://pandoc.org/"
+ >&2 echo "Please install it and make sure it is available on your \$PATH."
+ exit 2
+fi
+
+API=https://api.github.com/repos
+REPO=docker/compose
+GITHUB_REPO=git@github.com:$REPO
+
+# Check the build status is green
+sha=$(git rev-parse HEAD)
+url=$API/$REPO/statuses/$sha
+build_status=$(curl -s $url | jq -r '.[0].state')
+if [ -n "$SKIP_BUILD_CHECK" ]; then
+ echo "Skipping build status check..."
+elif [[ "$build_status" != "success" ]]; then
+ >&2 echo "Build status is $build_status, but it should be success."
+ exit -1
+fi
+
+echo "Tagging the release as $VERSION"
+git tag $VERSION
+git push $GITHUB_REPO $VERSION
+
+echo "Uploading the docker image"
+docker push docker/compose:$VERSION
+
+echo "Uploading the compose-tests image"
+docker push docker/compose-tests:latest
+docker push docker/compose-tests:$VERSION
+
+echo "Uploading package to PyPI"
+pandoc -f markdown -t rst README.md -o README.rst
+sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
+./script/build/write-git-sha
+python setup.py sdist bdist_wheel
+if [ "$(command -v twine 2> /dev/null)" ]; then
+ twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz ./dist/docker_compose-${VERSION/-/}-py2.py3-none-any.whl
+else
+ python setup.py upload
+fi
+
+echo "Testing pip package"
+deactivate || true
+virtualenv venv-test
+source venv-test/bin/activate
+pip install docker-compose==$VERSION
+docker-compose version
+deactivate
+rm -rf venv-test
+
+echo "Now publish the github release, and test the downloads."
+echo "Email maintainers@dockerproject.org and engineering@docker.com about the new release."
diff --git a/script/release/rebase-bump-commit b/script/release/rebase-bump-commit
new file mode 100755
index 00000000..3c2ae72b
--- /dev/null
+++ b/script/release/rebase-bump-commit
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Move the "bump to <version>" commit to the HEAD of the branch
+#
+
+. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
+
+function usage() {
+ >&2 cat << EOM
+Move the "bump to <version>" commit to the HEAD of the branch
+
+This script requires that 'git config branch.${BRANCH}.release' is set to the
+release version for the release branch.
+
+EOM
+ exit 1
+}
+
+
+BRANCH="$(git rev-parse --abbrev-ref HEAD)"
+VERSION="$(git config "branch.${BRANCH}.release")" || usage
+
+
+COMMIT_MSG="Bump $VERSION"
+sha="$(git log --grep "$COMMIT_MSG\$" --format="%H")"
+if [ -z "$sha" ]; then
+ >&2 echo "No commit with message \"$COMMIT_MSG\""
+ exit 2
+fi
+if [[ "$sha" == "$(git rev-parse HEAD)" ]]; then
+ >&2 echo "Bump commit already at HEAD"
+ exit 0
+fi
+
+commits=$(git log --format="%H" "$sha..HEAD" | wc -l | xargs echo)
+
+git rebase --onto $sha~1 HEAD~$commits $BRANCH
+git cherry-pick $sha
diff --git a/script/release/utils.sh b/script/release/utils.sh
new file mode 100644
index 00000000..321c1fb7
--- /dev/null
+++ b/script/release/utils.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Util functions for release scripts
+#
+
+set -e
+set -o pipefail
+
+
+function browser() {
+ local url=$1
+ xdg-open $url || open $url
+}
+
+
+function find_remote() {
+ local url=$1
+ for remote in $(git remote); do
+ git config --get remote.${remote}.url | grep $url > /dev/null && echo -n $remote
+ done
+ # Always return true, extra remotes cause it to return false
+ true
+}
diff --git a/script/run/run.ps1 b/script/run/run.ps1
new file mode 100644
index 00000000..47ec5469
--- /dev/null
+++ b/script/run/run.ps1
@@ -0,0 +1,22 @@
+# Run docker-compose in a container via boot2docker.
+#
+# The current directory will be mirrored as a volume and additional
+# volumes (or any other options) can be mounted by using
+# $Env:DOCKER_COMPOSE_OPTIONS.
+
+if ($Env:DOCKER_COMPOSE_VERSION -eq $null -or $Env:DOCKER_COMPOSE_VERSION.Length -eq 0) {
+ $Env:DOCKER_COMPOSE_VERSION = "latest"
+}
+
+if ($Env:DOCKER_COMPOSE_OPTIONS -eq $null) {
+ $Env:DOCKER_COMPOSE_OPTIONS = ""
+}
+
+if (-not $Env:DOCKER_HOST) {
+ docker-machine env --shell=powershell default | Invoke-Expression
+ if (-not $?) { exit $LastExitCode }
+}
+
+$local="/$($PWD -replace '^(.):(.*)$', '"$1".ToLower()+"$2".Replace("\","/")' | Invoke-Expression)"
+docker run --rm -ti -v /var/run/docker.sock:/var/run/docker.sock -v "${local}:$local" -w "$local" $Env:DOCKER_COMPOSE_OPTIONS "docker/compose:$Env:DOCKER_COMPOSE_VERSION" $args
+exit $LastExitCode
diff --git a/script/run/run.sh b/script/run/run.sh
new file mode 100755
index 00000000..58483196
--- /dev/null
+++ b/script/run/run.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# Run docker-compose in a container
+#
+# This script will attempt to mirror the host paths by using volumes for the
+# following paths:
+# * $(pwd)
+# * $(dirname $COMPOSE_FILE) if it's set
+# * $HOME if it's set
+#
+# You can add additional volumes (or any docker run options) using
+# the $COMPOSE_OPTIONS environment variable.
+#
+
+
+set -e
+
+VERSION="1.17.1"
+IMAGE="docker/compose:$VERSION"
+
+
+# Setup options for connecting to docker host
+if [ -z "$DOCKER_HOST" ]; then
+ DOCKER_HOST="/var/run/docker.sock"
+fi
+if [ -S "$DOCKER_HOST" ]; then
+ DOCKER_ADDR="-v $DOCKER_HOST:$DOCKER_HOST -e DOCKER_HOST"
+else
+ DOCKER_ADDR="-e DOCKER_HOST -e DOCKER_TLS_VERIFY -e DOCKER_CERT_PATH"
+fi
+
+
+# Setup volume mounts for compose config and context
+if [ "$(pwd)" != '/' ]; then
+ VOLUMES="-v $(pwd):$(pwd)"
+fi
+if [ -n "$COMPOSE_FILE" ]; then
+ COMPOSE_OPTIONS="$COMPOSE_OPTIONS -e COMPOSE_FILE=$COMPOSE_FILE"
+ compose_dir=$(realpath $(dirname $COMPOSE_FILE))
+fi
+# TODO: also check --file argument
+if [ -n "$compose_dir" ]; then
+ VOLUMES="$VOLUMES -v $compose_dir:$compose_dir"
+fi
+if [ -n "$HOME" ]; then
+ VOLUMES="$VOLUMES -v $HOME:$HOME -v $HOME:/root" # mount $HOME in /root to share docker.config
+fi
+
+# Only allocate tty if we detect one
+if [ -t 1 ]; then
+ DOCKER_RUN_OPTIONS="-t"
+fi
+if [ -t 0 ]; then
+ DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
+fi
+
+exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
diff --git a/script/setup/osx b/script/setup/osx
new file mode 100755
index 00000000..e0c2bd0a
--- /dev/null
+++ b/script/setup/osx
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -ex
+
+python_version() {
+ python -V 2>&1
+}
+
+openssl_version() {
+ python -c "import ssl; print ssl.OPENSSL_VERSION"
+}
+
+desired_python_version="2.7.12"
+desired_python_brew_version="2.7.12"
+python_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/737a2e34a89b213c1f0a2a24fc1a3c06635eed04/Formula/python.rb"
+
+desired_openssl_version="1.0.2j"
+desired_openssl_brew_version="1.0.2j"
+openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
+
+PATH="/usr/local/bin:$PATH"
+
+if !(which brew); then
+ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+fi
+
+brew update > /dev/null
+
+if !(python_version | grep "$desired_python_version"); then
+ if brew list | grep python; then
+ brew unlink python
+ fi
+
+ brew install "$python_formula"
+ brew switch python "$desired_python_brew_version"
+fi
+
+if !(openssl_version | grep "$desired_openssl_version"); then
+ if brew list | grep openssl; then
+ brew unlink openssl
+ fi
+
+ brew install "$openssl_formula"
+ brew switch openssl "$desired_openssl_brew_version"
+fi
+
+echo "*** Using $(python_version)"
+echo "*** Using $(openssl_version)"
+
+if !(which virtualenv); then
+ pip install virtualenv
+fi
diff --git a/script/test/all b/script/test/all
new file mode 100755
index 00000000..1200c496
--- /dev/null
+++ b/script/test/all
@@ -0,0 +1,64 @@
+#!/bin/bash
+# This should be run inside a container built from the Dockerfile
+# at the root of the repo - script/test will do it automatically.
+
+set -e
+
+>&2 echo "Running lint checks"
+docker run --rm \
+ --tty \
+ ${GIT_VOLUME} \
+ --entrypoint="tox" \
+ "$TAG" -e pre-commit
+
+get_versions="docker run --rm
+ --entrypoint=/code/.tox/py27/bin/python
+ $TAG
+ /code/script/test/versions.py docker/docker-ce,moby/moby"
+
+if [ "$DOCKER_VERSIONS" == "" ]; then
+ DOCKER_VERSIONS="$($get_versions default)"
+elif [ "$DOCKER_VERSIONS" == "all" ]; then
+ DOCKER_VERSIONS=$($get_versions -n 2 recent)
+fi
+
+
+BUILD_NUMBER=${BUILD_NUMBER-$USER}
+PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py34}
+
+for version in $DOCKER_VERSIONS; do
+ >&2 echo "Running tests against Docker $version"
+
+ daemon_container="compose-dind-$version-$BUILD_NUMBER"
+
+ function on_exit() {
+ if [[ "$?" != "0" ]]; then
+ docker logs "$daemon_container" 2>&1 | tail -n 100
+ fi
+ docker rm -vf "$daemon_container"
+ }
+
+ trap "on_exit" EXIT
+
+ repo="dockerswarm/dind"
+
+ docker run \
+ -d \
+ --name "$daemon_container" \
+ --privileged \
+ --volume="/var/lib/docker" \
+ "$repo:$version" \
+ dockerd -H tcp://0.0.0.0:2375 $DOCKER_DAEMON_ARGS \
+ 2>&1 | tail -n 10
+
+ docker run \
+ --rm \
+ --tty \
+ --link="$daemon_container:docker" \
+ --env="DOCKER_HOST=tcp://docker:2375" \
+ --env="DOCKER_VERSION=$version" \
+ --entrypoint="tox" \
+ "$TAG" \
+ -e "$PY_TEST_VERSIONS" -- "$@"
+
+done
diff --git a/script/test/ci b/script/test/ci
new file mode 100755
index 00000000..c5927b2c
--- /dev/null
+++ b/script/test/ci
@@ -0,0 +1,25 @@
+#!/bin/bash
+# This should be run inside a container built from the Dockerfile
+# at the root of the repo:
+#
+# $ TAG="docker-compose:$(git rev-parse --short HEAD)"
+# $ docker build -t "$TAG" .
+# $ docker run --rm \
+# --volume="/var/run/docker.sock:/var/run/docker.sock" \
+# --volume="$(pwd)/.git:/code/.git" \
+# -e "TAG=$TAG" \
+# --entrypoint="script/test/ci" "$TAG"
+
+set -ex
+
+docker version
+
+export DOCKER_VERSIONS=all
+STORAGE_DRIVER=${STORAGE_DRIVER:-overlay}
+export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
+
+GIT_VOLUME="--volumes-from=$(hostname)"
+. script/test/all
+
+>&2 echo "Building Linux binary"
+. script/build/linux-entrypoint
diff --git a/script/test/default b/script/test/default
new file mode 100755
index 00000000..aabb4e42
--- /dev/null
+++ b/script/test/default
@@ -0,0 +1,19 @@
+#!/bin/bash
+# See CONTRIBUTING.md for usage.
+
+set -ex
+
+TAG="docker-compose:$(git rev-parse --short HEAD)"
+
+# By default use the Dockerfile, but can be overriden to use an alternative file
+# e.g DOCKERFILE=Dockerfile.armhf script/test/default
+DOCKERFILE="${DOCKERFILE:-Dockerfile}"
+
+rm -rf coverage-html
+# Create the host directory so it's owned by $USER
+mkdir -p coverage-html
+
+docker build -f ${DOCKERFILE} -t "$TAG" .
+
+GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
+. script/test/all
diff --git a/script/test/versions.py b/script/test/versions.py
new file mode 100755
index 00000000..46872ed9
--- /dev/null
+++ b/script/test/versions.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+"""
+Query the github API for the git tags of a project, and return a list of
+version tags for recent releases, or the default release.
+
+The default release is the most recent non-RC version.
+
+Recent is a list of unique major.minor versions, where each is the most
+recent version in the series.
+
+For example, if the list of versions is:
+
+ 1.8.0-rc2
+ 1.8.0-rc1
+ 1.7.1
+ 1.7.0
+ 1.7.0-rc1
+ 1.6.2
+ 1.6.1
+
+`default` would return `1.7.1` and
+`recent -n 3` would return `1.8.0-rc2 1.7.1 1.6.2`
+"""
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import argparse
+import itertools
+import operator
+import sys
+from collections import namedtuple
+
+import requests
+
+
+GITHUB_API = 'https://api.github.com/repos'
+
+
+class Version(namedtuple('_Version', 'major minor patch rc edition')):
+
+ @classmethod
+ def parse(cls, version):
+ edition = None
+ version = version.lstrip('v')
+ version, _, rc = version.partition('-')
+ if rc:
+ if 'rc' not in rc:
+ edition = rc
+ rc = None
+ elif '-' in rc:
+ edition, rc = rc.split('-')
+
+ major, minor, patch = version.split('.', 3)
+ return cls(major, minor, patch, rc, edition)
+
+ @property
+ def major_minor(self):
+ return self.major, self.minor
+
+ @property
+ def order(self):
+ """Return a representation that allows this object to be sorted
+ correctly with the default comparator.
+ """
+ # rc releases should appear before official releases
+ rc = (0, self.rc) if self.rc else (1, )
+ return (int(self.major), int(self.minor), int(self.patch)) + rc
+
+ def __str__(self):
+ rc = '-{}'.format(self.rc) if self.rc else ''
+ edition = '-{}'.format(self.edition) if self.edition else ''
+ return '.'.join(map(str, self[:3])) + edition + rc
+
+
+def group_versions(versions):
+ """Group versions by `major.minor` releases.
+
+ Example:
+
+ >>> group_versions([
+ Version(1, 0, 0),
+ Version(2, 0, 0, 'rc1'),
+ Version(2, 0, 0),
+ Version(2, 1, 0),
+ ])
+
+ [
+ [Version(1, 0, 0)],
+ [Version(2, 0, 0), Version(2, 0, 0, 'rc1')],
+ [Version(2, 1, 0)],
+ ]
+ """
+ return list(
+ list(releases)
+ for _, releases
+ in itertools.groupby(versions, operator.attrgetter('major_minor'))
+ )
+
+
+def get_latest_versions(versions, num=1):
+ """Return a list of the most recent versions for each major.minor version
+ group.
+ """
+ versions = group_versions(versions)
+ num = min(len(versions), num)
+ return [versions[index][0] for index in range(num)]
+
+
+def get_default(versions):
+ """Return a :class:`Version` for the latest non-rc version."""
+ for version in versions:
+ if not version.rc:
+ return version
+
+
+def get_versions(tags):
+ for tag in tags:
+ try:
+ yield Version.parse(tag['name'])
+ except ValueError:
+ print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
+
+
+def get_github_releases(projects):
+ """Query the Github API for a list of version tags and return them in
+ sorted order.
+
+ See https://developer.github.com/v3/repos/#list-tags
+ """
+ versions = []
+ for project in projects:
+ url = '{}/{}/tags'.format(GITHUB_API, project)
+ response = requests.get(url)
+ response.raise_for_status()
+ versions.extend(get_versions(response.json()))
+ return sorted(versions, reverse=True, key=operator.attrgetter('order'))
+
+
+def parse_args(argv):
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('project', help="Github project name (ex: docker/docker)")
+ parser.add_argument('command', choices=['recent', 'default'])
+ parser.add_argument('-n', '--num', type=int, default=2,
+ help="Number of versions to return from `recent`")
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ args = parse_args(argv)
+ versions = get_github_releases(args.project.split(','))
+
+ if args.command == 'recent':
+ print(' '.join(map(str, get_latest_versions(versions, args.num))))
+ elif args.command == 'default':
+ print(get_default(versions))
+ else:
+ raise ValueError("Unknown command {}".format(args.command))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/script/travis/bintray.json.tmpl b/script/travis/bintray.json.tmpl
new file mode 100644
index 00000000..f9728558
--- /dev/null
+++ b/script/travis/bintray.json.tmpl
@@ -0,0 +1,29 @@
+{
+ "package": {
+ "name": "${TRAVIS_OS_NAME}",
+ "repo": "${TRAVIS_BRANCH}",
+ "subject": "docker-compose",
+ "desc": "Automated build of master branch from travis ci.",
+ "website_url": "https://github.com/docker/compose",
+ "issue_tracker_url": "https://github.com/docker/compose/issues",
+ "vcs_url": "https://github.com/docker/compose.git",
+ "licenses": ["Apache-2.0"]
+ },
+
+ "version": {
+ "name": "${TRAVIS_BRANCH}",
+ "desc": "Automated build of the ${TRAVIS_BRANCH} branch.",
+ "released": "${DATE}",
+ "vcs_tag": "master"
+ },
+
+ "files": [
+ {
+ "includePattern": "dist/(.*)",
+ "excludePattern": ".*\.tar.gz",
+ "uploadPattern": "$1",
+ "matrixParams": { "override": 1 }
+ }
+ ],
+ "publish": true
+}
diff --git a/script/travis/build-binary b/script/travis/build-binary
new file mode 100755
index 00000000..7707a1ee
--- /dev/null
+++ b/script/travis/build-binary
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -ex
+
+if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ script/build/linux
+ # TODO: requires auth to push, so disable for now
+ # script/build/image master
+ # docker push docker/compose:master
+else
+ script/setup/osx
+ script/build/osx
+fi
diff --git a/script/travis/ci b/script/travis/ci
new file mode 100755
index 00000000..cd4fcc6d
--- /dev/null
+++ b/script/travis/ci
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ tox -e py27,py34 -- tests/unit
+else
+ # TODO: we could also install py34 and test against it
+ tox -e py27 -- tests/unit
+fi
diff --git a/script/travis/install b/script/travis/install
new file mode 100755
index 00000000..d4b34786
--- /dev/null
+++ b/script/travis/install
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ pip install tox==2.1.1
+else
+ sudo pip install --upgrade pip tox==2.1.1 virtualenv
+ pip --version
+fi
diff --git a/script/travis/render-bintray-config.py b/script/travis/render-bintray-config.py
new file mode 100755
index 00000000..b5364a0b
--- /dev/null
+++ b/script/travis/render-bintray-config.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import datetime
+import os.path
+import sys
+
+os.environ['DATE'] = str(datetime.date.today())
+
+for line in sys.stdin:
+ print(os.path.expandvars(line), end='')
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..3c6e79cf
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal=1
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..192a0f6a
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import codecs
+import os
+import re
+import sys
+
+import pkg_resources
+from setuptools import find_packages
+from setuptools import setup
+
+
+def read(*parts):
+ path = os.path.join(os.path.dirname(__file__), *parts)
+ with codecs.open(path, encoding='utf-8') as fobj:
+ return fobj.read()
+
+
+def find_version(*file_paths):
+ version_file = read(*file_paths)
+ version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
+ version_file, re.M)
+ if version_match:
+ return version_match.group(1)
+ raise RuntimeError("Unable to find version string.")
+
+
+install_requires = [
+ 'cached-property >= 1.2.0, < 2',
+ 'docopt >= 0.6.1, < 0.7',
+ 'PyYAML >= 3.10, < 4',
+ 'requests >= 2.6.1, != 2.11.0, < 2.12',
+ 'texttable >= 0.9.0, < 0.10',
+ 'websocket-client >= 0.32.0, < 1.0',
+ 'docker >= 2.5.1, < 3.0',
+ 'dockerpty >= 0.4.1, < 0.5',
+ 'six >= 1.3.0, < 2',
+ 'jsonschema >= 2.5.1, < 3',
+]
+
+
+tests_require = [
+ 'pytest',
+]
+
+
+if sys.version_info[:2] < (3, 4):
+ tests_require.append('mock >= 1.0.1')
+
+extras_require = {
+ ':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
+ ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
+ ':python_version < "3.3"': ['ipaddress >= 1.0.16'],
+ ':sys_platform == "win32"': ['colorama >= 0.3.7, < 0.4'],
+ 'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
+}
+
+
+try:
+ if 'bdist_wheel' not in sys.argv:
+ for key, value in extras_require.items():
+ if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]):
+ install_requires.extend(value)
+except Exception as e:
+ print("Failed to compute platform dependencies: {}. ".format(e) +
+ "All dependencies will be installed as a result.", file=sys.stderr)
+ for key, value in extras_require.items():
+ if key.startswith(':'):
+ install_requires.extend(value)
+
+
+setup(
+ name='docker-compose',
+ version=find_version("compose", "__init__.py"),
+ description='Multi-container orchestration for Docker',
+ url='https://www.docker.com/',
+ author='Docker, Inc.',
+ license='Apache License 2.0',
+ packages=find_packages(exclude=['tests.*', 'tests']),
+ include_package_data=True,
+ test_suite='nose.collector',
+ install_requires=install_requires,
+ extras_require=extras_require,
+ tests_require=tests_require,
+ entry_points="""
+ [console_scripts]
+ docker-compose=compose.cli.main:main
+ """,
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ ],
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..1ac1b21c
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import sys
+
+if sys.version_info >= (2, 7):
+ import unittest # NOQA
+else:
+ import unittest2 as unittest # NOQA
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock # NOQA
diff --git a/tests/acceptance/__init__.py b/tests/acceptance/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/acceptance/__init__.py
diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py
new file mode 100644
index 00000000..bba2238e
--- /dev/null
+++ b/tests/acceptance/cli_test.py
@@ -0,0 +1,2390 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+import json
+import os
+import os.path
+import re
+import signal
+import subprocess
+import time
+from collections import Counter
+from collections import namedtuple
+from operator import attrgetter
+
+import pytest
+import six
+import yaml
+from docker import errors
+
+from .. import mock
+from ..helpers import create_host_file
+from compose.cli.command import get_project
+from compose.config.errors import DuplicateOverrideFileFound
+from compose.container import Container
+from compose.project import OneOffFilter
+from compose.utils import nanoseconds_from_time_seconds
+from tests.integration.testcases import DockerClientTestCase
+from tests.integration.testcases import get_links
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import pull_busybox
+from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
+
+
+BUILD_CACHE_TEXT = 'Using cache'
+BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
+
+
+def start_process(base_dir, options):
+ proc = subprocess.Popen(
+ ['docker-compose'] + options,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=base_dir)
+ print("Running process: %s" % proc.pid)
+ return proc
+
+
+def wait_on_process(proc, returncode=0):
+ stdout, stderr = proc.communicate()
+ if proc.returncode != returncode:
+ print("Stderr: {}".format(stderr))
+ print("Stdout: {}".format(stdout))
+ assert proc.returncode == returncode
+ return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def kill_service(service):
+ for container in service.containers():
+ if container.is_running:
+ container.kill()
+
+
+class ContainerCountCondition(object):
+
+ def __init__(self, project, expected):
+ self.project = project
+ self.expected = expected
+
+ def __call__(self):
+ return len([c for c in self.project.containers() if c.is_running]) == self.expected
+
+ def __str__(self):
+ return "waiting for counter count == %s" % self.expected
+
+
+class ContainerStateCondition(object):
+
+ def __init__(self, client, name, status):
+ self.client = client
+ self.name = name
+ self.status = status
+
+ def __call__(self):
+ try:
+ container = self.client.inspect_container(self.name)
+ return container['State']['Status'] == self.status
+ except errors.APIError:
+ return False
+
+ def __str__(self):
+ return "waiting for container to be %s" % self.status
+
+
+class CLITestCase(DockerClientTestCase):
+
+ def setUp(self):
+ super(CLITestCase, self).setUp()
+ self.base_dir = 'tests/fixtures/simple-composefile'
+ self.override_dir = None
+
+ def tearDown(self):
+ if self.base_dir:
+ self.project.kill()
+ self.project.down(None, True)
+
+ for container in self.project.containers(stopped=True, one_off=OneOffFilter.only):
+ container.remove(force=True)
+ networks = self.client.networks()
+ for n in networks:
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
+ self.client.remove_network(n['Name'])
+ volumes = self.client.volumes().get('Volumes') or []
+ for v in volumes:
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
+ self.client.remove_volume(v['Name'])
+ if hasattr(self, '_project'):
+ del self._project
+
+ super(CLITestCase, self).tearDown()
+
+ @property
+ def project(self):
+ # Hack: allow project to be overridden
+ if not hasattr(self, '_project'):
+ self._project = get_project(self.base_dir, override_dir=self.override_dir)
+ return self._project
+
+ def dispatch(self, options, project_options=None, returncode=0):
+ project_options = project_options or []
+ proc = start_process(self.base_dir, project_options + options)
+ return wait_on_process(proc, returncode=returncode)
+
+ def execute(self, container, cmd):
+ # Remove once Hijack and CloseNotifier sign a peace treaty
+ self.client.close()
+ exc = self.client.exec_create(container.id, cmd)
+ self.client.exec_start(exc)
+ return self.client.exec_inspect(exc)['ExitCode']
+
+ def lookup(self, container, hostname):
+ return self.execute(container, ["nslookup", hostname]) == 0
+
+ def test_help(self):
+ self.base_dir = 'tests/fixtures/no-composefile'
+ result = self.dispatch(['help', 'up'], returncode=0)
+ assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
+ # Prevent tearDown from trying to create a project
+ self.base_dir = None
+
+ def test_help_nonexistent(self):
+ self.base_dir = 'tests/fixtures/no-composefile'
+ result = self.dispatch(['help', 'foobar'], returncode=1)
+ assert 'No such command' in result.stderr
+ self.base_dir = None
+
+ def test_shorthand_host_opt(self):
+ self.dispatch(
+ ['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
+ 'up', '-d'],
+ returncode=0
+ )
+
+ def test_host_not_reachable(self):
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
+ def test_host_not_reachable_volumes_from_container(self):
+ self.base_dir = 'tests/fixtures/volumes-from-container'
+
+ container = self.client.create_container(
+ 'busybox', 'true', name='composetest_data_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, container)
+
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
+ def test_config_list_services(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--services'])
+ assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
+
+ def test_config_list_volumes(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--volumes'])
+ assert set(result.stdout.rstrip().split('\n')) == {'data'}
+
+ def test_config_quiet_with_error(self):
+ self.base_dir = None
+ result = self.dispatch([
+ '-f', 'tests/fixtures/invalid-composefile/invalid.yml',
+ 'config', '-q'
+ ], returncode=1)
+ assert "'notaservice' must be a mapping" in result.stderr
+
+ def test_config_quiet(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ assert self.dispatch(['config', '-q']).stdout == ''
+
+ def test_config_default(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config'])
+ # assert there are no python objects encoded in the output
+ assert '!!' not in result.stdout
+
+ output = yaml.load(result.stdout)
+ expected = {
+ 'version': '2.0',
+ 'volumes': {'data': {'driver': 'local'}},
+ 'networks': {'front': {}},
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': os.path.abspath(self.base_dir),
+ },
+ 'networks': {'front': None, 'default': None},
+ 'volumes_from': ['service:other:rw'],
+ },
+ 'other': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'volumes': ['/data'],
+ },
+ },
+ }
+ assert output == expected
+
+ def test_config_restart(self):
+ self.base_dir = 'tests/fixtures/restart'
+ result = self.dispatch(['config'])
+ assert yaml.load(result.stdout) == {
+ 'version': '2.0',
+ 'services': {
+ 'never': {
+ 'image': 'busybox',
+ 'restart': 'no',
+ },
+ 'always': {
+ 'image': 'busybox',
+ 'restart': 'always',
+ },
+ 'on-failure': {
+ 'image': 'busybox',
+ 'restart': 'on-failure',
+ },
+ 'on-failure-5': {
+ 'image': 'busybox',
+ 'restart': 'on-failure:5',
+ },
+ 'restart-null': {
+ 'image': 'busybox',
+ 'restart': ''
+ },
+ },
+ }
+
+ def test_config_external_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+ result = self.dispatch(['-f', 'external-networks.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'networks' in json_result
+ assert json_result['networks'] == {
+ 'networks_foo': {
+ 'external': True # {'name': 'networks_foo'}
+ },
+ 'bar': {
+ 'external': {'name': 'networks_bar'}
+ }
+ }
+
+ def test_config_external_volume_v2(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v2_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
+ def test_config_external_volume_v3_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v3_4(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
+ def test_config_v1(self):
+ self.base_dir = 'tests/fixtures/v1-config'
+ result = self.dispatch(['config'])
+ assert yaml.load(result.stdout) == {
+ 'version': '2.1',
+ 'services': {
+ 'net': {
+ 'image': 'busybox',
+ 'network_mode': 'bridge',
+ },
+ 'volume': {
+ 'image': 'busybox',
+ 'volumes': ['/data'],
+ 'network_mode': 'bridge',
+ },
+ 'app': {
+ 'image': 'busybox',
+ 'volumes_from': ['service:volume:rw'],
+ 'network_mode': 'service:net',
+ },
+ },
+ }
+
+ @v3_only()
+ def test_config_v3(self):
+ self.base_dir = 'tests/fixtures/v3-full'
+ result = self.dispatch(['config'])
+
+ assert yaml.load(result.stdout) == {
+ 'version': '3.2',
+ 'volumes': {
+ 'foobar': {
+ 'labels': {
+ 'com.docker.compose.test': 'true',
+ },
+ },
+ },
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'replicated',
+ 'replicas': 6,
+ 'labels': ['FOO=BAR'],
+ 'update_config': {
+ 'parallelism': 3,
+ 'delay': '10s',
+ 'failure_action': 'continue',
+ 'monitor': '60s',
+ 'max_failure_ratio': 0.3,
+ },
+ 'resources': {
+ 'limits': {
+ 'cpus': '0.001',
+ 'memory': '50M',
+ },
+ 'reservations': {
+ 'cpus': '0.0001',
+ 'memory': '20M',
+ },
+ },
+ 'restart_policy': {
+ 'condition': 'on_failure',
+ 'delay': '5s',
+ 'max_attempts': 3,
+ 'window': '120s',
+ },
+ 'placement': {
+ 'constraints': ['node=foo'],
+ },
+ },
+
+ 'healthcheck': {
+ 'test': 'cat /etc/passwd',
+ 'interval': '10s',
+ 'timeout': '1s',
+ 'retries': 5,
+ },
+ 'volumes': [
+ '/host/path:/container/path:ro',
+ 'foobar:/container/volumepath:rw',
+ '/anonymous',
+ 'foobar:/container/volumepath2:nocopy'
+ ],
+
+ 'stop_grace_period': '20s',
+ },
+ },
+ }
+
+ def test_ps(self):
+ self.project.get_service('simple').create_container()
+ result = self.dispatch(['ps'])
+ assert 'simplecomposefile_simple_1' in result.stdout
+
+ def test_ps_default_composefile(self):
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['ps'])
+
+ self.assertIn('multiplecomposefiles_simple_1', result.stdout)
+ self.assertIn('multiplecomposefiles_another_1', result.stdout)
+ self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
+
+ def test_ps_alternate_composefile(self):
+ config_path = os.path.abspath(
+ 'tests/fixtures/multiple-composefiles/compose2.yml')
+ self._project = get_project(self.base_dir, [config_path])
+
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
+ result = self.dispatch(['-f', 'compose2.yml', 'ps'])
+
+ self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
+ self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
+ self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
+
+ def test_pull(self):
+ result = self.dispatch(['pull'])
+ assert sorted(result.stderr.split('\n'))[1:] == [
+ 'Pulling another (busybox:latest)...',
+ 'Pulling simple (busybox:latest)...',
+ ]
+
+ def test_pull_with_digest(self):
+ result = self.dispatch(['-f', 'digest.yml', 'pull'])
+
+ assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert ('Pulling digest (busybox@'
+ 'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
+ '04ee8502d)...') in result.stderr
+
+ def test_pull_with_ignore_pull_failures(self):
+ result = self.dispatch([
+ '-f', 'ignore-pull-failures.yml',
+ 'pull', '--ignore-pull-failures']
+ )
+
+ assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
+ assert ('repository nonexisting-image not found' in result.stderr or
+ 'image library/nonexisting-image:latest not found' in result.stderr or
+ 'pull access denied for nonexisting-image' in result.stderr)
+
+ def test_pull_with_parallel_failure(self):
+ result = self.dispatch([
+ '-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
+ returncode=1
+ )
+
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
+ re.MULTILINE))
+
+ def test_pull_with_quiet(self):
+ assert self.dispatch(['pull', '--quiet']).stderr == ''
+ assert self.dispatch(['pull', '--quiet']).stdout == ''
+
+ def test_build_plain(self):
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', 'simple'])
+ assert BUILD_PULL_TEXT not in result.stdout
+
+ def test_build_no_cache(self):
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', '--no-cache', 'simple'])
+ assert BUILD_CACHE_TEXT not in result.stdout
+ assert BUILD_PULL_TEXT not in result.stdout
+
+ def test_build_pull(self):
+ # Make sure we have the latest busybox already
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'], None)
+
+ result = self.dispatch(['build', '--pull', 'simple'])
+ if not is_cluster(self.client):
+ # If previous build happened on another node, cache won't be available
+ assert BUILD_CACHE_TEXT in result.stdout
+ assert BUILD_PULL_TEXT in result.stdout
+
+ def test_build_no_cache_pull(self):
+ # Make sure we have the latest busybox already
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', 'simple'])
+
+ result = self.dispatch(['build', '--no-cache', '--pull', 'simple'])
+ assert BUILD_CACHE_TEXT not in result.stdout
+ assert BUILD_PULL_TEXT in result.stdout
+
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
+ def test_build_failed(self):
+ self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
+ self.dispatch(['build', 'simple'], returncode=1)
+
+ labels = ["com.docker.compose.test_failing_image=true"]
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(
+ all=True,
+ filters={"label": labels})
+ ]
+ assert len(containers) == 1
+
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
+ def test_build_failed_forcerm(self):
+ self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
+ self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
+
+ labels = ["com.docker.compose.test_failing_image=true"]
+
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(
+ all=True,
+ filters={"label": labels})
+ ]
+ assert not containers
+
+ def test_build_shm_size_build_option(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-shm-size'
+ result = self.dispatch(['build', '--no-cache'], None)
+ assert 'shm_size: 96' in result.stdout
+
+ def test_bundle_with_digests(self):
+ self.base_dir = 'tests/fixtures/bundle-with-digests/'
+ tmpdir = pytest.ensuretemp('cli_test_bundle')
+ self.addCleanup(tmpdir.remove)
+ filename = str(tmpdir.join('example.dab'))
+
+ self.dispatch(['bundle', '--output', filename])
+ with open(filename, 'r') as fh:
+ bundle = json.load(fh)
+
+ assert bundle == {
+ 'Version': '0.1',
+ 'Services': {
+ 'web': {
+ 'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
+ '44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
+ 'Networks': ['default'],
+ },
+ 'redis': {
+ 'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
+ '374b2b7392de1e7d77be26ef8f7b'),
+ 'Networks': ['default'],
+ }
+ },
+ }
+
+ def test_build_override_dir(self):
+ self.base_dir = 'tests/fixtures/build-path-override-dir'
+ self.override_dir = os.path.abspath('tests/fixtures')
+ result = self.dispatch([
+ '--project-directory', self.override_dir,
+ 'build'])
+
+ assert 'Successfully built' in result.stdout
+
+ def test_build_override_dir_invalid_path(self):
+ config_path = os.path.abspath('tests/fixtures/build-path-override-dir/docker-compose.yml')
+ result = self.dispatch([
+ '-f', config_path,
+ 'build'], returncode=1)
+
+ assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
+
+ def test_create(self):
+ self.dispatch(['create'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ service_containers = service.containers(stopped=True)
+ another_containers = another.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert len(another_containers) == 1
+ assert not service_containers[0].is_running
+ assert not another_containers[0].is_running
+
+ def test_create_with_force_recreate(self):
+ self.dispatch(['create'], None)
+ service = self.project.get_service('simple')
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ old_ids = [c.id for c in service.containers(stopped=True)]
+
+ self.dispatch(['create', '--force-recreate'], None)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ new_ids = [c.id for c in service_containers]
+
+ assert old_ids != new_ids
+
+ def test_create_with_no_recreate(self):
+ self.dispatch(['create'], None)
+ service = self.project.get_service('simple')
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ old_ids = [c.id for c in service.containers(stopped=True)]
+
+ self.dispatch(['create', '--no-recreate'], None)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ new_ids = [c.id for c in service_containers]
+
+ assert old_ids == new_ids
+
+ def test_run_one_off_with_volume(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mount = container_data.get('Mounts')[0]
+ assert mount['Source'] == volume_path
+ assert mount['Destination'] == '/data'
+ assert mount['Type'] == 'bind'
+
+ def test_run_one_off_with_multiple_volumes(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f' '/data1/example.txt'
+ ], returncode=0)
+
+ def test_run_one_off_with_volume_merge(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ '-f', 'docker-compose.merge.yml',
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mounts = container_data.get('Mounts')
+ assert len(mounts) == 2
+ config_mount = [m for m in mounts if m['Destination'] == '/data1'][0]
+ override_mount = [m for m in mounts if m['Destination'] == '/data'][0]
+
+ assert config_mount['Type'] == 'volume'
+ assert override_mount['Source'] == volume_path
+ assert override_mount['Type'] == 'bind'
+
+ def test_create_with_force_recreate_and_no_recreate(self):
+ self.dispatch(
+ ['create', '--force-recreate', '--no-recreate'],
+ returncode=1)
+
+ def test_down_invalid_rmi_flag(self):
+ result = self.dispatch(['down', '--rmi', 'bogus'], returncode=1)
+ assert '--rmi flag must be' in result.stderr
+
+ @v2_only()
+ def test_down(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+
+ self.dispatch(['up', '-d'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ self.dispatch(['run', 'web', 'true'])
+ self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null'])
+ assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2
+
+ result = self.dispatch(['down', '--rmi=local', '--volumes'])
+ assert 'Stopping v2full_web_1' in result.stderr
+ assert 'Stopping v2full_other_1' in result.stderr
+ assert 'Stopping v2full_web_run_2' in result.stderr
+ assert 'Removing v2full_web_1' in result.stderr
+ assert 'Removing v2full_other_1' in result.stderr
+ assert 'Removing v2full_web_run_1' in result.stderr
+ assert 'Removing v2full_web_run_2' in result.stderr
+ assert 'Removing volume v2full_data' in result.stderr
+ assert 'Removing image v2full_web' in result.stderr
+ assert 'Removing image busybox' not in result.stderr
+ assert 'Removing network v2full_default' in result.stderr
+ assert 'Removing network v2full_front' in result.stderr
+
+ def test_up_detached(self):
+ self.dispatch(['up', '-d'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertEqual(len(another.containers()), 1)
+
+ # Ensure containers don't have stdin and stdout connected in -d mode
+ container, = service.containers()
+ self.assertFalse(container.get('Config.AttachStderr'))
+ self.assertFalse(container.get('Config.AttachStdout'))
+ self.assertFalse(container.get('Config.AttachStdin'))
+
+ def test_up_attached(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ result = self.dispatch(['up', '--no-color'])
+
+ assert 'simple_1 | simple' in result.stdout
+ assert 'another_1 | another' in result.stdout
+ assert 'simple_1 exited with code 0' in result.stdout
+ assert 'another_1 exited with code 0' in result.stdout
+
+ @v2_only()
+ def test_up(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ self.dispatch(['up', '-d'], None)
+
+ services = self.project.get_services()
+
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+ self.assertEqual(len(networks), 1)
+ assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
+ assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
+
+ network = self.client.inspect_network(networks[0]['Id'])
+
+ for service in services:
+ containers = service.containers()
+ self.assertEqual(len(containers), 1)
+
+ container = containers[0]
+ self.assertIn(container.id, network['Containers'])
+
+ networks = container.get('NetworkSettings.Networks')
+ self.assertEqual(list(networks), [network['Name']])
+
+ self.assertEqual(
+ sorted(networks[network['Name']]['Aliases']),
+ sorted([service.name, container.short_id]))
+
+ for service in services:
+ assert self.lookup(container, service.name)
+
+ @v2_only()
+ def test_up_no_start(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ self.dispatch(['up', '--no-start'], None)
+
+ services = self.project.get_services()
+
+ default_network = self.project.networks.networks['default'].full_name
+ front_network = self.project.networks.networks['front'].full_name
+ networks = self.client.networks(names=[default_network, front_network])
+ assert len(networks) == 2
+
+ for service in services:
+ containers = service.containers(stopped=True)
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert not container.is_running
+ assert container.get('State.Status') == 'created'
+
+ volumes = self.project.volumes.volumes
+ assert 'data' in volumes
+ volume = volumes['data']
+
+ # The code below is a Swarm-compatible equivalent to volume.exists()
+ remote_volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1] == volume.full_name
+ ]
+ assert len(remote_volumes) > 0
+
+ @v2_only()
+ def test_up_no_ansi(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ result = self.dispatch(['--no-ansi', 'up', '-d'], None)
+ assert "%c[2K\r" % 27 not in result.stderr
+ assert "%c[1A" % 27 not in result.stderr
+ assert "%c[1B" % 27 not in result.stderr
+
+ @v2_only()
+ def test_up_with_default_network_config(self):
+ filename = 'default-network-config.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+
+ assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false'
+
+ @v2_only()
+ def test_up_with_network_aliases(self):
+ filename = 'network-aliases.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ back_name = '{}_back'.format(self.project.name)
+ front_name = '{}_front'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # Two networks were created: back and front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
+ web_container = self.project.get_service('web').containers()[0]
+
+ back_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(back_name)
+ )
+ assert 'web' in back_aliases
+ front_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(front_name)
+ )
+ assert 'web' in front_aliases
+ assert 'forward_facing' in front_aliases
+ assert 'ahead' in front_aliases
+
+ @v2_only()
+ def test_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ filename = 'network-internal.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ internal_net = '{}_internal'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # One network was created: internal
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net]
+
+ assert networks[0]['Internal'] is True
+
+ @v2_only()
+ def test_up_with_network_static_addresses(self):
+ filename = 'network-static-addresses.yml'
+ ipv4_address = '172.16.100.100'
+ ipv6_address = 'fe80::1001:100'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ static_net = '{}_static_test'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # One networks was created: front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net]
+ web_container = self.project.get_service('web').containers()[0]
+
+ ipam_config = web_container.get(
+ 'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net)
+ )
+ assert ipv4_address in ipam_config.values()
+ assert ipv6_address in ipam_config.values()
+
+ @v2_only()
+ def test_up_with_networks(self):
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['up', '-d'], None)
+
+ back_name = '{}_back'.format(self.project.name)
+ front_name = '{}_front'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # Two networks were created: back and front
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
+
+ # lookup by ID instead of name in case of duplicates
+ back_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == back_name][0]['Id']
+ )
+ front_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == front_name][0]['Id']
+ )
+
+ web_container = self.project.get_service('web').containers()[0]
+ app_container = self.project.get_service('app').containers()[0]
+ db_container = self.project.get_service('db').containers()[0]
+
+ for net_name in [front_name, back_name]:
+ links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name))
+ assert '{}:database'.format(db_container.name) in links
+
+ # db and app joined the back network
+ assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id])
+
+ # web and app joined the front network
+ assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id])
+
+ # web can see app but not db
+ assert self.lookup(web_container, "app")
+ assert not self.lookup(web_container, "db")
+
+ # app can see db
+ assert self.lookup(app_container, "db")
+
+ # app has aliased db to "database"
+ assert self.lookup(app_container, "database")
+
+ @v2_only()
+ def test_up_missing_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+
+ result = self.dispatch(
+ ['-f', 'missing-network.yml', 'up', '-d'],
+ returncode=1)
+
+ assert 'Service "web" uses an undefined network "foo"' in result.stderr
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_up_with_network_mode(self):
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_network_mode_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, c, force=True)
+ self.client.start(c)
+ container_mode_source = 'container:{}'.format(c['Id'])
+
+ filename = 'network-mode.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ for name in ['bridge', 'host', 'none']:
+ container = self.project.get_service(name).containers()[0]
+ assert list(container.get('NetworkSettings.Networks')) == [name]
+ assert container.get('HostConfig.NetworkMode') == name
+
+ service_mode_source = 'container:{}'.format(
+ self.project.get_service('bridge').containers()[0].id)
+ service_mode_container = self.project.get_service('service').containers()[0]
+ assert not service_mode_container.get('NetworkSettings.Networks')
+ assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source
+
+ container_mode_container = self.project.get_service('container').containers()[0]
+ assert not container_mode_container.get('NetworkSettings.Networks')
+ assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source
+
+ @v2_only()
+ def test_up_external_networks(self):
+ filename = 'external-networks.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
+ assert 'declared as external, but could not be found' in result.stderr
+
+ networks = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']]
+ for name in network_names:
+ self.client.create_network(name, attachable=True)
+
+ self.dispatch(['-f', filename, 'up', '-d'])
+ container = self.project.containers()[0]
+ assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names)
+
+ @v2_only()
+ def test_up_with_external_default_network(self):
+ filename = 'external-default.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
+ assert 'declared as external, but could not be found' in result.stderr
+
+ networks = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert not networks
+
+ network_name = 'composetest_external_network'
+ self.client.create_network(network_name, attachable=True)
+
+ self.dispatch(['-f', filename, 'up', '-d'])
+ container = self.project.containers()[0]
+ assert list(container.get('NetworkSettings.Networks')) == [network_name]
+
+ @v2_1_only()
+ def test_up_with_network_labels(self):
+ filename = 'network-label.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ network_with_label = '{}_network_with_label'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_1_only()
+ def test_up_with_volume_labels(self):
+ filename = 'volume-label.yml'
+
+ self.base_dir = 'tests/fixtures/volumes'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ volume_with_label = '{}_volume_with_label'.format(self.project.name)
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_up_no_services(self):
+ self.base_dir = 'tests/fixtures/no-services'
+ self.dispatch(['up', '-d'], None)
+
+ network_names = [
+ n['Name'] for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+ assert network_names == []
+
+ def test_up_with_links_v1(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'web'], None)
+
+ # No network was created
+ network_name = self.project.networks.networks['default'].full_name
+ networks = self.client.networks(names=[network_name])
+ assert networks == []
+
+ web = self.project.get_service('web')
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+
+ # console was not started
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ # web has links
+ web_container = web.containers()[0]
+ self.assertTrue(web_container.get('HostConfig.Links'))
+
+ def test_up_with_net_is_invalid(self):
+ self.base_dir = 'tests/fixtures/net-container'
+
+ result = self.dispatch(
+ ['-f', 'v2-invalid.yml', 'up', '-d'],
+ returncode=1)
+
+ assert "Unsupported config option for services.bar: 'net'" in result.stderr
+
+ @no_cluster("Legacy networking not supported on Swarm")
+ def test_up_with_net_v1(self):
+ self.base_dir = 'tests/fixtures/net-container'
+ self.dispatch(['up', '-d'], None)
+
+ bar = self.project.get_service('bar')
+ bar_container = bar.containers()[0]
+
+ foo = self.project.get_service('foo')
+ foo_container = foo.containers()[0]
+
+ assert foo_container.get('HostConfig.NetworkMode') == \
+ 'container:{}'.format(bar_container.id)
+
+ @v3_only()
+ def test_up_with_healthcheck(self):
+ def wait_on_health_status(container, status):
+ def condition():
+ container.inspect()
+ return container.get('State.Health.Status') == status
+
+ return wait_on_condition(condition, delay=0.5)
+
+ self.base_dir = 'tests/fixtures/healthcheck'
+ self.dispatch(['up', '-d'], None)
+
+ passes = self.project.get_service('passes')
+ passes_container = passes.containers()[0]
+
+ assert passes_container.get('Config.Healthcheck') == {
+ "Test": ["CMD-SHELL", "/bin/true"],
+ "Interval": nanoseconds_from_time_seconds(1),
+ "Timeout": nanoseconds_from_time_seconds(30 * 60),
+ "Retries": 1,
+ }
+
+ wait_on_health_status(passes_container, 'healthy')
+
+ fails = self.project.get_service('fails')
+ fails_container = fails.containers()[0]
+
+ assert fails_container.get('Config.Healthcheck') == {
+ "Test": ["CMD", "/bin/false"],
+ "Interval": nanoseconds_from_time_seconds(2.5),
+ "Retries": 2,
+ }
+
+ wait_on_health_status(fails_container, 'unhealthy')
+
+ disabled = self.project.get_service('disabled')
+ disabled_container = disabled.containers()[0]
+
+ assert disabled_container.get('Config.Healthcheck') == {
+ "Test": ["NONE"],
+ }
+
+ assert 'Health' not in disabled_container.get('State')
+
+ def test_up_with_no_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', '--no-deps', 'web'], None)
+ web = self.project.get_service('web')
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 0)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_up_with_force_recreate(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+
+ old_ids = [c.id for c in service.containers()]
+
+ self.dispatch(['up', '-d', '--force-recreate'], None)
+ self.assertEqual(len(service.containers()), 1)
+
+ new_ids = [c.id for c in service.containers()]
+
+ self.assertNotEqual(old_ids, new_ids)
+
+ def test_up_with_no_recreate(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+
+ old_ids = [c.id for c in service.containers()]
+
+ self.dispatch(['up', '-d', '--no-recreate'], None)
+ self.assertEqual(len(service.containers()), 1)
+
+ new_ids = [c.id for c in service.containers()]
+
+ self.assertEqual(old_ids, new_ids)
+
+ def test_up_with_force_recreate_and_no_recreate(self):
+ self.dispatch(
+ ['up', '-d', '--force-recreate', '--no-recreate'],
+ returncode=1)
+
+ def test_up_with_timeout(self):
+ self.dispatch(['up', '-d', '-t', '1'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertEqual(len(another.containers()), 1)
+
+ # Ensure containers don't have stdin and stdout connected in -d mode
+ config = service.containers()[0].inspect()['Config']
+ self.assertFalse(config['AttachStderr'])
+ self.assertFalse(config['AttachStdout'])
+ self.assertFalse(config['AttachStdin'])
+
+ def test_up_handles_sigint(self):
+ proc = start_process(self.base_dir, ['up', '-t', '2'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ def test_up_handles_sigterm(self):
+ proc = start_process(self.base_dir, ['up', '-t', '2'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ @v2_only()
+ def test_up_handles_force_shutdown(self):
+ self.base_dir = 'tests/fixtures/sleeps-composefile'
+ proc = start_process(self.base_dir, ['up', '-t', '200'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ time.sleep(0.1)
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+
+ def test_up_handles_abort_on_container_exit(self):
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-0'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 0)
+
+ def test_up_handles_abort_on_container_exit_code(self):
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 1)
+
+ @v2_only()
+ @no_cluster('Container PID mode does not work across clusters')
+ def test_up_with_pid_mode(self):
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_pid_mode_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, c, force=True)
+ self.client.start(c)
+ container_mode_source = 'container:{}'.format(c['Id'])
+
+ self.base_dir = 'tests/fixtures/pid-mode'
+
+ self.dispatch(['up', '-d'], None)
+
+ service_mode_source = 'container:{}'.format(
+ self.project.get_service('container').containers()[0].id)
+ service_mode_container = self.project.get_service('service').containers()[0]
+ assert service_mode_container.get('HostConfig.PidMode') == service_mode_source
+
+ container_mode_container = self.project.get_service('container').containers()[0]
+ assert container_mode_container.get('HostConfig.PidMode') == container_mode_source
+
+ host_mode_container = self.project.get_service('host').containers()[0]
+ assert host_mode_container.get('HostConfig.PidMode') == 'host'
+
+ def test_exec_without_tty(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'console'])
+ self.assertEqual(len(self.project.containers()), 1)
+
+ stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
+ self.assertEqual(stderr, "")
+ self.assertEqual(stdout, "/\n")
+
+ def test_exec_custom_user(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'console'])
+ self.assertEqual(len(self.project.containers()), 1)
+
+ stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
+ self.assertEqual(stdout, "operator\n")
+ self.assertEqual(stderr, "")
+
+ def test_run_service_without_links(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', 'console', '/bin/true'])
+ self.assertEqual(len(self.project.containers()), 0)
+
+ # Ensure stdin/out was open
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ config = container.inspect()['Config']
+ self.assertTrue(config['AttachStderr'])
+ self.assertTrue(config['AttachStdout'])
+ self.assertTrue(config['AttachStdin'])
+
+ def test_run_service_with_links(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ @v2_only()
+ def test_run_service_with_dependencies(self):
+ self.base_dir = 'tests/fixtures/v2-dependencies'
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_run_service_with_scaled_dependencies(self):
+ self.base_dir = 'tests/fixtures/v2-dependencies'
+ self.dispatch(['up', '-d', '--scale', 'db=2', '--scale', 'console=0'])
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+
+ def test_run_with_no_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
+ db = self.project.get_service('db')
+ self.assertEqual(len(db.containers()), 0)
+
+ def test_run_does_not_recreate_linked_containers(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '-d', 'db'])
+ db = self.project.get_service('db')
+ self.assertEqual(len(db.containers()), 1)
+
+ old_ids = [c.id for c in db.containers()]
+
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ self.assertEqual(len(db.containers()), 1)
+
+ new_ids = [c.id for c in db.containers()]
+
+ self.assertEqual(old_ids, new_ids)
+
+ def test_run_without_command(self):
+ self.base_dir = 'tests/fixtures/commands-composefile'
+ self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
+
+ self.dispatch(['run', 'implicit'])
+ service = self.project.get_service('implicit')
+ containers = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(
+ [c.human_readable_command for c in containers],
+ [u'/bin/sh -c echo "success"'],
+ )
+
+ self.dispatch(['run', 'explicit'])
+ service = self.project.get_service('explicit')
+ containers = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(
+ [c.human_readable_command for c in containers],
+ [u'/bin/true'],
+ )
+
+ @pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
+ def test_run_rm(self):
+ self.base_dir = 'tests/fixtures/volume'
+ proc = start_process(self.base_dir, ['run', '--rm', 'test'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'volume_test_run_1',
+ 'running'))
+ service = self.project.get_service('test')
+ containers = service.containers(one_off=OneOffFilter.only)
+ self.assertEqual(len(containers), 1)
+ mounts = containers[0].get('Mounts')
+ for mount in mounts:
+ if mount['Destination'] == '/container-path':
+ anonymous_name = mount['Name']
+ break
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_process(proc, 1)
+
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ volumes = self.client.volumes()['Volumes']
+ assert volumes is not None
+ for volume in service.options.get('volumes'):
+ if volume.internal == '/container-named-path':
+ name = volume.external
+ break
+ volume_names = [v['Name'].split('/')[-1] for v in volumes]
+ assert name in volume_names
+ assert anonymous_name not in volume_names
+
+ def test_run_service_with_dockerfile_entrypoint(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['printf']
+ assert container.get('Config.Cmd') == ['default', 'args']
+
+ def test_run_service_with_dockerfile_entrypoint_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert not container.get('Config.Cmd')
+
+ def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['foo']
+
+ def test_run_service_with_compose_file_entrypoint(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['printf']
+ assert container.get('Config.Cmd') == ['default', 'args']
+
+ def test_run_service_with_compose_file_entrypoint_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert not container.get('Config.Cmd')
+
+ def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['foo']
+
+ def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
+ self.base_dir = 'tests/fixtures/entrypoint-composefile'
+ self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') == ['echo']
+ assert container.get('Config.Cmd') == ['']
+
+ def test_run_service_with_user_overridden(self):
+ self.base_dir = 'tests/fixtures/user-composefile'
+ name = 'service'
+ user = 'sshd'
+ self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ self.assertEqual(user, container.get('Config.User'))
+
+ def test_run_service_with_user_overridden_short_form(self):
+ self.base_dir = 'tests/fixtures/user-composefile'
+ name = 'service'
+ user = 'sshd'
+ self.dispatch(['run', '-u', user, name], returncode=1)
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ self.assertEqual(user, container.get('Config.User'))
+
+ def test_run_service_with_environment_overridden(self):
+ name = 'service'
+ self.base_dir = 'tests/fixtures/environment-composefile'
+ self.dispatch([
+ 'run', '-e', 'foo=notbar',
+ '-e', 'allo=moto=bobo',
+ '-e', 'alpha=beta',
+ name,
+ '/bin/true',
+ ])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ # env overridden
+ self.assertEqual('notbar', container.environment['foo'])
+ # keep environment from yaml
+ self.assertEqual('world', container.environment['hello'])
+ # added option from command line
+ self.assertEqual('beta', container.environment['alpha'])
+ # make sure a value with a = don't crash out
+ self.assertEqual('moto=bobo', container.environment['allo'])
+
+ def test_run_service_without_map_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_random = container.get_local_port(3000)
+ port_assigned = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ self.assertEqual(port_random, None)
+ self.assertEqual(port_assigned, None)
+
+ def test_run_service_with_map_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', '--service-ports', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_random = container.get_local_port(3000)
+ port_assigned = container.get_local_port(3001)
+ port_range = container.get_local_port(3002), container.get_local_port(3003)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ assert port_random is not None
+ assert port_assigned.endswith(':49152')
+ assert port_range[0].endswith(':49153')
+ assert port_range[1].endswith(':49154')
+
+ def test_run_service_with_explicitly_mapped_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_short = container.get_local_port(3000)
+ port_full = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ assert port_short.endswith(':30000')
+ assert port_full.endswith(':30001')
+
+ def test_run_service_with_explicitly_mapped_ip_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch([
+ 'run', '-d',
+ '-p', '127.0.0.1:30000:3000',
+ '--publish', '127.0.0.1:30001:3001',
+ 'simple'
+ ])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ # get port information
+ port_short = container.get_local_port(3000)
+ port_full = container.get_local_port(3001)
+
+ # close all one off containers we just created
+ container.stop()
+
+ # check the ports
+ self.assertEqual(port_short, "127.0.0.1:30000")
+ self.assertEqual(port_full, "127.0.0.1:30001")
+
+ def test_run_with_expose_ports(self):
+ # create one off container
+ self.base_dir = 'tests/fixtures/expose-composefile'
+ self.dispatch(['run', '-d', '--service-ports', 'simple'])
+ container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
+
+ ports = container.ports
+ self.assertEqual(len(ports), 9)
+ # exposed ports are not mapped to host ports
+ assert ports['3000/tcp'] is None
+ assert ports['3001/tcp'] is None
+ assert ports['3001/udp'] is None
+ assert ports['3002/tcp'] is None
+ assert ports['3003/tcp'] is None
+ assert ports['3004/tcp'] is None
+ assert ports['3005/tcp'] is None
+ assert ports['3006/udp'] is None
+ assert ports['3007/udp'] is None
+
+ # close all one off containers we just created
+ container.stop()
+
+ def test_run_with_custom_name(self):
+ self.base_dir = 'tests/fixtures/environment-composefile'
+ name = 'the-container-name'
+ self.dispatch(['run', '--name', name, 'service', '/bin/true'])
+
+ service = self.project.get_service('service')
+ container, = service.containers(stopped=True, one_off=OneOffFilter.only)
+ self.assertEqual(container.name, name)
+
+ def test_run_service_with_workdir_overridden(self):
+ self.base_dir = 'tests/fixtures/run-workdir'
+ name = 'service'
+ workdir = '/var'
+ self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=True)[0]
+ self.assertEqual(workdir, container.get('Config.WorkingDir'))
+
+ def test_run_service_with_workdir_overridden_short_form(self):
+ self.base_dir = 'tests/fixtures/run-workdir'
+ name = 'service'
+ workdir = '/var'
+ self.dispatch(['run', '-w', workdir, name])
+ service = self.project.get_service(name)
+ container = service.containers(stopped=True, one_off=True)[0]
+ self.assertEqual(workdir, container.get('Config.WorkingDir'))
+
+ @v2_only()
+ def test_run_interactive_connects_to_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+
+ self.dispatch(['up', '-d'])
+ self.dispatch(['run', 'app', 'nslookup', 'app'])
+ self.dispatch(['run', 'app', 'nslookup', 'db'])
+
+ containers = self.project.get_service('app').containers(
+ stopped=True, one_off=OneOffFilter.only)
+ assert len(containers) == 2
+
+ for container in containers:
+ networks = container.get('NetworkSettings.Networks')
+
+ assert sorted(list(networks)) == [
+ '{}_{}'.format(self.project.name, name)
+ for name in ['back', 'front']
+ ]
+
+ for _, config in networks.items():
+ # TODO: once we drop support for API <1.24, this can be changed to:
+ # assert config['Aliases'] == [container.short_id]
+ aliases = set(config['Aliases'] or []) - set([container.short_id])
+ assert not aliases
+
+ @v2_only()
+ def test_run_detached_connects_to_network(self):
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['up', '-d'])
+ self.dispatch(['run', '-d', 'app', 'top'])
+
+ container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0]
+ networks = container.get('NetworkSettings.Networks')
+
+ assert sorted(list(networks)) == [
+ '{}_{}'.format(self.project.name, name)
+ for name in ['back', 'front']
+ ]
+
+ for _, config in networks.items():
+ # TODO: once we drop support for API <1.24, this can be changed to:
+ # assert config['Aliases'] == [container.short_id]
+ aliases = set(config['Aliases'] or []) - set([container.short_id])
+ assert not aliases
+
+ assert self.lookup(container, 'app')
+ assert self.lookup(container, 'db')
+
+ def test_run_handles_sigint(self):
+ proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'running'))
+
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'exited'))
+
+ def test_run_handles_sigterm(self):
+ proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'running'))
+
+ os.kill(proc.pid, signal.SIGTERM)
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simplecomposefile_simple_run_1',
+ 'exited'))
+
+ @mock.patch.dict(os.environ)
+ def test_run_unicode_env_values_from_system(self):
+ value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż'
+ if six.PY2: # os.environ doesn't support unicode values in Py2
+ os.environ['BAR'] = value.encode('utf-8')
+ else: # ... and doesn't support byte values in Py3
+ os.environ['BAR'] = value
+ self.base_dir = 'tests/fixtures/unicode-environment'
+ result = self.dispatch(['run', 'simple'])
+
+ if six.PY2: # Can't retrieve output on Py3. See issue #3670
+ assert value == result.stdout.strip()
+
+ container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ environment = container.get('Config.Env')
+ assert 'FOO={}'.format(value) in environment
+
+ @mock.patch.dict(os.environ)
+ def test_run_env_values_from_system(self):
+ os.environ['FOO'] = 'bar'
+ os.environ['BAR'] = 'baz'
+
+ self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
+
+ container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ environment = container.get('Config.Env')
+ assert 'FOO=bar' in environment
+ assert 'BAR=baz' not in environment
+
+ def test_rm(self):
+ service = self.project.get_service('simple')
+ service.create_container()
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['rm', '--force'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ service = self.project.get_service('simple')
+ service.create_container()
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['rm', '-f'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ service = self.project.get_service('simple')
+ service.create_container()
+ self.dispatch(['rm', '-fs'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+
+ def test_rm_stop(self):
+ self.dispatch(['up', '-d'], None)
+ simple = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs'], None)
+ assert len(simple.containers(stopped=True)) == 0
+ assert len(another.containers(stopped=True)) == 0
+
+ self.dispatch(['up', '-d'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs', 'another'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers(stopped=True)) == 0
+
+ def test_rm_all(self):
+ service = self.project.get_service('simple')
+ service.create_container(one_off=False)
+ service.create_container(one_off=True)
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ self.dispatch(['rm', '-f'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ service.create_container(one_off=False)
+ service.create_container(one_off=True)
+ kill_service(service)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ self.dispatch(['rm', '-f', '--all'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ def test_stop(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['stop', '-t', '1'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_stop_signal(self):
+ self.base_dir = 'tests/fixtures/stop-signal-composefile'
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['stop', '-t', '1'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+ self.assertEqual(service.containers(stopped=True)[0].exit_code, 0)
+
+ def test_start_no_containers(self):
+ result = self.dispatch(['start'], returncode=1)
+ assert 'No containers to start' in result.stderr
+
+ @v2_only()
+ def test_up_logging(self):
+ self.base_dir = 'tests/fixtures/logging-composefile'
+ self.dispatch(['up', '-d'])
+ simple = self.project.get_service('simple').containers()[0]
+ log_config = simple.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ another = self.project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'json-file')
+ self.assertEqual(log_config.get('Config')['max-size'], '10m')
+
+ def test_up_logging_legacy(self):
+ self.base_dir = 'tests/fixtures/logging-composefile-legacy'
+ self.dispatch(['up', '-d'])
+ simple = self.project.get_service('simple').containers()[0]
+ log_config = simple.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ another = self.project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'json-file')
+ self.assertEqual(log_config.get('Config')['max-size'], '10m')
+
+ def test_pause_unpause(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertFalse(service.containers()[0].is_paused)
+
+ self.dispatch(['pause'], None)
+ self.assertTrue(service.containers()[0].is_paused)
+
+ self.dispatch(['unpause'], None)
+ self.assertFalse(service.containers()[0].is_paused)
+
+ def test_pause_no_containers(self):
+ result = self.dispatch(['pause'], returncode=1)
+ assert 'No containers to pause' in result.stderr
+
+ def test_unpause_no_containers(self):
+ result = self.dispatch(['unpause'], returncode=1)
+ assert 'No containers to unpause' in result.stderr
+
+ def test_logs_invalid_service_name(self):
+ self.dispatch(['logs', 'madeupname'], returncode=1)
+
+ def test_logs_follow(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs', '-f'])
+
+ if not is_cluster(self.client):
+ assert result.stdout.count('\n') == 5
+ else:
+ # Sometimes logs are picked up from old containers that haven't yet
+ # been removed (removal in Swarm is async)
+ assert result.stdout.count('\n') >= 5
+
+ assert 'simple' in result.stdout
+ assert 'another' in result.stdout
+ assert 'exited with code 0' in result.stdout
+
+ def test_logs_follow_logs_from_new_containers(self):
+ self.base_dir = 'tests/fixtures/logs-composefile'
+ self.dispatch(['up', '-d', 'simple'])
+
+ proc = start_process(self.base_dir, ['logs', '-f'])
+
+ self.dispatch(['up', '-d', 'another'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'logscomposefile_another_1',
+ 'exited'))
+
+ self.dispatch(['kill', 'simple'])
+
+ result = wait_on_process(proc)
+
+ assert 'hello' in result.stdout
+ assert 'test' in result.stdout
+ assert 'logscomposefile_another_1 exited with code 0' in result.stdout
+ assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
+
+ def test_logs_default(self):
+ self.base_dir = 'tests/fixtures/logs-composefile'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs'])
+ assert 'hello' in result.stdout
+ assert 'test' in result.stdout
+ assert 'exited with' not in result.stdout
+
+ def test_logs_on_stopped_containers_exits(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up'])
+
+ result = self.dispatch(['logs'])
+ assert 'simple' in result.stdout
+ assert 'another' in result.stdout
+ assert 'exited with' not in result.stdout
+
+ def test_logs_timestamps(self):
+ self.base_dir = 'tests/fixtures/echo-services'
+ self.dispatch(['up', '-d'])
+
+ result = self.dispatch(['logs', '-f', '-t'])
+ self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})')
+
+ def test_logs_tail(self):
+ self.base_dir = 'tests/fixtures/logs-tail-composefile'
+ self.dispatch(['up'])
+
+ result = self.dispatch(['logs', '--tail', '2'])
+ assert 'c\n' in result.stdout
+ assert 'd\n' in result.stdout
+ assert 'a\n' not in result.stdout
+ assert 'b\n' not in result.stdout
+
+ def test_kill(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_kill_signal_sigstop(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill', '-s', 'SIGSTOP'], None)
+
+ self.assertEqual(len(service.containers()), 1)
+ # The container is still running. It has only been paused
+ self.assertTrue(service.containers()[0].is_running)
+
+ def test_kill_stopped_service(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ self.dispatch(['kill', '-s', 'SIGSTOP'], None)
+ self.assertTrue(service.containers()[0].is_running)
+
+ self.dispatch(['kill', '-s', 'SIGKILL'], None)
+
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.assertFalse(service.containers(stopped=True)[0].is_running)
+
+ def test_restart(self):
+ service = self.project.get_service('simple')
+ container = service.create_container()
+ service.start_container(container)
+ started_at = container.dictionary['State']['StartedAt']
+ self.dispatch(['restart', '-t', '1'], None)
+ container.inspect()
+ self.assertNotEqual(
+ container.dictionary['State']['FinishedAt'],
+ '0001-01-01T00:00:00Z',
+ )
+ self.assertNotEqual(
+ container.dictionary['State']['StartedAt'],
+ started_at,
+ )
+
+ def test_restart_stopped_container(self):
+ service = self.project.get_service('simple')
+ container = service.create_container()
+ container.start()
+ container.kill()
+ self.assertEqual(len(service.containers(stopped=True)), 1)
+ self.dispatch(['restart', '-t', '1'], None)
+ self.assertEqual(len(service.containers(stopped=False)), 1)
+
+ def test_restart_no_containers(self):
+ result = self.dispatch(['restart'], returncode=1)
+ assert 'No containers to restart' in result.stderr
+
+ def test_scale(self):
+ project = self.project
+
+ self.dispatch(['scale', 'simple=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+
+ self.dispatch(['scale', 'simple=3', 'another=2'])
+ self.assertEqual(len(project.get_service('simple').containers()), 3)
+ self.assertEqual(len(project.get_service('another').containers()), 2)
+
+ self.dispatch(['scale', 'simple=1', 'another=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+ self.assertEqual(len(project.get_service('another').containers()), 1)
+
+ self.dispatch(['scale', 'simple=1', 'another=1'])
+ self.assertEqual(len(project.get_service('simple').containers()), 1)
+ self.assertEqual(len(project.get_service('another').containers()), 1)
+
+ self.dispatch(['scale', 'simple=0', 'another=0'])
+ self.assertEqual(len(project.get_service('simple').containers()), 0)
+ self.assertEqual(len(project.get_service('another').containers()), 0)
+
+ def test_scale_v2_2(self):
+ self.base_dir = 'tests/fixtures/scale'
+ result = self.dispatch(['scale', 'web=1'], returncode=1)
+ assert 'incompatible with the v2.2 format' in result.stderr
+
+ def test_up_scale_scale_up(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_scale_down(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=1'])
+ assert len(project.get_service('web').containers()) == 1
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_reset(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 3
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_to_zero(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
+ assert len(project.get_service('web').containers()) == 0
+ assert len(project.get_service('db').containers()) == 0
+
+ def test_port(self):
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['up', '-d'], None)
+ container = self.project.get_service('simple').get_container()
+
+ def get_port(number):
+ result = self.dispatch(['port', 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':49152' in get_port(3001)
+ assert ':49153' in get_port(3002)
+
+ def test_expanded_port(self):
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['-f', 'expanded-notation.yml', 'up', '-d'])
+ container = self.project.get_service('simple').get_container()
+
+ def get_port(number):
+ result = self.dispatch(['port', 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':53222' in get_port(3001)
+ assert ':53223' in get_port(3002)
+
+ def test_port_with_scale(self):
+ self.base_dir = 'tests/fixtures/ports-composefile-scale'
+ self.dispatch(['scale', 'simple=2'], None)
+ containers = sorted(
+ self.project.containers(service_names=['simple']),
+ key=attrgetter('name'))
+
+ def get_port(number, index=None):
+ if index is None:
+ result = self.dispatch(['port', 'simple', str(number)])
+ else:
+ result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
+ self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
+ self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
+ self.assertEqual(get_port(3002), "")
+
+ def test_events_json(self):
+ events_proc = start_process(self.base_dir, ['events', '--json'])
+ self.dispatch(['up', '-d'])
+ wait_on_condition(ContainerCountCondition(self.project, 2))
+
+ os.kill(events_proc.pid, signal.SIGINT)
+ result = wait_on_process(events_proc, returncode=1)
+ lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')]
+ assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2}
+
+ def test_events_human_readable(self):
+
+ def has_timestamp(string):
+ str_iso_date, str_iso_time, container_info = string.split(' ', 2)
+ try:
+ return isinstance(datetime.datetime.strptime(
+ '%s %s' % (str_iso_date, str_iso_time),
+ '%Y-%m-%d %H:%M:%S.%f'),
+ datetime.datetime)
+ except ValueError:
+ return False
+
+ events_proc = start_process(self.base_dir, ['events'])
+ self.dispatch(['up', '-d', 'simple'])
+ wait_on_condition(ContainerCountCondition(self.project, 1))
+
+ os.kill(events_proc.pid, signal.SIGINT)
+ result = wait_on_process(events_proc, returncode=1)
+ lines = result.stdout.rstrip().split('\n')
+ assert len(lines) == 2
+
+ container, = self.project.containers()
+ expected_template = ' container {} {}'
+ expected_meta_info = ['image=busybox:latest', 'name=simplecomposefile_simple_1']
+
+ assert expected_template.format('create', container.id) in lines[0]
+ assert expected_template.format('start', container.id) in lines[1]
+ for line in lines:
+ for info in expected_meta_info:
+ assert info in line
+
+ assert has_timestamp(lines[0])
+
+ def test_env_file_relative_to_compose_file(self):
+ config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
+ self.dispatch(['-f', config_path, 'up', '-d'], None)
+ self._project = get_project(self.base_dir, [config_path])
+
+ containers = self.project.containers(stopped=True)
+ self.assertEqual(len(containers), 1)
+ self.assertIn("FOO=1", containers[0].get('Config.Env'))
+
+ @mock.patch.dict(os.environ)
+ def test_home_and_env_var_in_volume_path(self):
+ os.environ['VOLUME_NAME'] = 'my-volume'
+ os.environ['HOME'] = '/tmp/home-dir'
+
+ self.base_dir = 'tests/fixtures/volume-path-interpolation'
+ self.dispatch(['up', '-d'], None)
+
+ container = self.project.containers(stopped=True)[0]
+ actual_host_path = container.get_mount('/container-path')['Source']
+ components = actual_host_path.split('/')
+ assert components[-2:] == ['home-dir', 'my-volume']
+
+ def test_up_with_default_override_file(self):
+ self.base_dir = 'tests/fixtures/override-files'
+ self.dispatch(['up', '-d'], None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 2)
+
+ web, db = containers
+ self.assertEqual(web.human_readable_command, 'top')
+ self.assertEqual(db.human_readable_command, 'top')
+
+ def test_up_with_multiple_files(self):
+ self.base_dir = 'tests/fixtures/override-files'
+ config_paths = [
+ 'docker-compose.yml',
+ 'docker-compose.override.yml',
+ 'extra.yml',
+ ]
+ self._project = get_project(self.base_dir, config_paths)
+ self.dispatch(
+ [
+ '-f', config_paths[0],
+ '-f', config_paths[1],
+ '-f', config_paths[2],
+ 'up', '-d',
+ ],
+ None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 3)
+
+ web, other, db = containers
+ self.assertEqual(web.human_readable_command, 'top')
+ self.assertEqual(db.human_readable_command, 'top')
+ self.assertEqual(other.human_readable_command, 'top')
+
+ def test_up_with_extends(self):
+ self.base_dir = 'tests/fixtures/extends'
+ self.dispatch(['up', '-d'], None)
+
+ self.assertEqual(
+ set([s.name for s in self.project.services]),
+ set(['mydb', 'myweb']),
+ )
+
+ # Sort by name so we get [db, web]
+ containers = sorted(
+ self.project.containers(stopped=True),
+ key=lambda c: c.name,
+ )
+
+ self.assertEqual(len(containers), 2)
+ web = containers[1]
+
+ self.assertEqual(
+ set(get_links(web)),
+ set(['db', 'mydb_1', 'extends_mydb_1']))
+
+ expected_env = set([
+ "FOO=1",
+ "BAR=2",
+ "BAZ=2",
+ ])
+ self.assertTrue(expected_env <= set(web.get('Config.Env')))
+
+ def test_top_services_not_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ result = self.dispatch(['top'])
+ assert len(result.stdout) == 0
+
+ def test_top_services_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+
+ self.assertIn('top_service_a', result.stdout)
+ self.assertIn('top_service_b', result.stdout)
+ self.assertNotIn('top_not_a_service', result.stdout)
+
+ def test_top_processes_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+ assert result.stdout.count("top") == 4
+
+ def test_forward_exitval(self):
+ self.base_dir = 'tests/fixtures/exit-code-from'
+ proc = start_process(
+ self.base_dir,
+ ['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
+
+ result = wait_on_process(proc, returncode=1)
+
+ assert 'exitcodefrom_another_1 exited with code 1' in result.stdout
+
+ def test_images(self):
+ self.project.get_service('simple').create_container()
+ result = self.dispatch(['images'])
+ assert 'busybox' in result.stdout
+ assert 'simplecomposefile_simple_1' in result.stdout
+
+ def test_images_default_composefile(self):
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['images'])
+
+ assert 'busybox' in result.stdout
+ assert 'multiplecomposefiles_another_1' in result.stdout
+ assert 'multiplecomposefiles_simple_1' in result.stdout
+
+ def test_up_with_override_yaml(self):
+ self.base_dir = 'tests/fixtures/override-yaml-files'
+ self._project = get_project(self.base_dir, [])
+ self.dispatch(
+ [
+ 'up', '-d',
+ ],
+ None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 2)
+
+ web, db = containers
+ self.assertEqual(web.human_readable_command, 'sleep 100')
+ self.assertEqual(db.human_readable_command, 'top')
+
+ def test_up_with_duplicate_override_yaml_files(self):
+ self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
+ with self.assertRaises(DuplicateOverrideFileFound):
+ get_project(self.base_dir, [])
+ self.base_dir = None
diff --git a/tests/fixtures/UpperCaseDir/docker-compose.yml b/tests/fixtures/UpperCaseDir/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/UpperCaseDir/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/abort-on-container-exit-0/docker-compose.yml b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
new file mode 100644
index 00000000..ce41697b
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls .
diff --git a/tests/fixtures/abort-on-container-exit-1/docker-compose.yml b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
new file mode 100644
index 00000000..7ec9b7e1
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls /thecakeisalie
diff --git a/tests/fixtures/build-ctx/Dockerfile b/tests/fixtures/build-ctx/Dockerfile
new file mode 100644
index 00000000..dd864b83
--- /dev/null
+++ b/tests/fixtures/build-ctx/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+CMD echo "success"
diff --git a/tests/fixtures/build-path-override-dir/docker-compose.yml b/tests/fixtures/build-path-override-dir/docker-compose.yml
new file mode 100644
index 00000000..15dbb3e6
--- /dev/null
+++ b/tests/fixtures/build-path-override-dir/docker-compose.yml
@@ -0,0 +1,2 @@
+foo:
+ build: ./build-ctx/
diff --git a/tests/fixtures/build-path/docker-compose.yml b/tests/fixtures/build-path/docker-compose.yml
new file mode 100644
index 00000000..66e8916e
--- /dev/null
+++ b/tests/fixtures/build-path/docker-compose.yml
@@ -0,0 +1,2 @@
+foo:
+ build: ../build-ctx/
diff --git a/tests/fixtures/build-shm-size/Dockerfile b/tests/fixtures/build-shm-size/Dockerfile
new file mode 100644
index 00000000..f91733d6
--- /dev/null
+++ b/tests/fixtures/build-shm-size/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+
+# Report the shm_size (through the size of /dev/shm)
+RUN echo "shm_size:" $(df -h /dev/shm | tail -n 1 | awk '{print $2}')
diff --git a/tests/fixtures/build-shm-size/docker-compose.yml b/tests/fixtures/build-shm-size/docker-compose.yml
new file mode 100644
index 00000000..238a5132
--- /dev/null
+++ b/tests/fixtures/build-shm-size/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '3.5'
+
+services:
+ custom_shm_size:
+ build:
+ context: .
+ shm_size: 100663296 # =96M
diff --git a/tests/fixtures/bundle-with-digests/docker-compose.yml b/tests/fixtures/bundle-with-digests/docker-compose.yml
new file mode 100644
index 00000000..b7013512
--- /dev/null
+++ b/tests/fixtures/bundle-with-digests/docker-compose.yml
@@ -0,0 +1,9 @@
+
+version: '2.0'
+
+services:
+ web:
+ image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
+
+ redis:
+ image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b
diff --git a/tests/fixtures/commands-composefile/docker-compose.yml b/tests/fixtures/commands-composefile/docker-compose.yml
new file mode 100644
index 00000000..87602bd6
--- /dev/null
+++ b/tests/fixtures/commands-composefile/docker-compose.yml
@@ -0,0 +1,5 @@
+implicit:
+ image: composetest_test
+explicit:
+ image: composetest_test
+ command: [ "/bin/true" ]
diff --git a/tests/fixtures/default-env-file/.env b/tests/fixtures/default-env-file/.env
new file mode 100644
index 00000000..9056de72
--- /dev/null
+++ b/tests/fixtures/default-env-file/.env
@@ -0,0 +1,4 @@
+IMAGE=alpine:latest
+COMMAND=true
+PORT1=5643
+PORT2=9999
diff --git a/tests/fixtures/default-env-file/docker-compose.yml b/tests/fixtures/default-env-file/docker-compose.yml
new file mode 100644
index 00000000..aa8e4409
--- /dev/null
+++ b/tests/fixtures/default-env-file/docker-compose.yml
@@ -0,0 +1,6 @@
+web:
+ image: ${IMAGE}
+ command: ${COMMAND}
+ ports:
+ - $PORT1
+ - $PORT2
diff --git a/tests/fixtures/dockerfile-with-volume/Dockerfile b/tests/fixtures/dockerfile-with-volume/Dockerfile
new file mode 100644
index 00000000..0d376ec4
--- /dev/null
+++ b/tests/fixtures/dockerfile-with-volume/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+VOLUME /data
+CMD top
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
new file mode 100644
index 00000000..f1b8ef18
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
@@ -0,0 +1,3 @@
+
+db:
+ command: "sleep 300"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/echo-services/docker-compose.yml b/tests/fixtures/echo-services/docker-compose.yml
new file mode 100644
index 00000000..8014f3d9
--- /dev/null
+++ b/tests/fixtures/echo-services/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: echo simple
+another:
+ image: busybox:latest
+ command: echo another
diff --git a/tests/fixtures/entrypoint-composefile/docker-compose.yml b/tests/fixtures/entrypoint-composefile/docker-compose.yml
new file mode 100644
index 00000000..e9880973
--- /dev/null
+++ b/tests/fixtures/entrypoint-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+version: "2"
+services:
+ test:
+ image: busybox
+ entrypoint: printf
+ command: default args
diff --git a/tests/fixtures/entrypoint-dockerfile/Dockerfile b/tests/fixtures/entrypoint-dockerfile/Dockerfile
new file mode 100644
index 00000000..49f4416c
--- /dev/null
+++ b/tests/fixtures/entrypoint-dockerfile/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+ENTRYPOINT ["printf"]
+CMD ["default", "args"]
diff --git a/tests/fixtures/entrypoint-dockerfile/docker-compose.yml b/tests/fixtures/entrypoint-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..8318e61f
--- /dev/null
+++ b/tests/fixtures/entrypoint-dockerfile/docker-compose.yml
@@ -0,0 +1,4 @@
+version: "2"
+services:
+ test:
+ build: .
diff --git a/tests/fixtures/env-file/docker-compose.yml b/tests/fixtures/env-file/docker-compose.yml
new file mode 100644
index 00000000..d9366ace
--- /dev/null
+++ b/tests/fixtures/env-file/docker-compose.yml
@@ -0,0 +1,4 @@
+web:
+ image: busybox
+ command: /bin/true
+ env_file: ./test.env
diff --git a/tests/fixtures/env-file/test.env b/tests/fixtures/env-file/test.env
new file mode 100644
index 00000000..d99cd41a
--- /dev/null
+++ b/tests/fixtures/env-file/test.env
@@ -0,0 +1 @@
+FOO=1
diff --git a/tests/fixtures/env/one.env b/tests/fixtures/env/one.env
new file mode 100644
index 00000000..45b59fe6
--- /dev/null
+++ b/tests/fixtures/env/one.env
@@ -0,0 +1,11 @@
+# Keep the blank lines and comments in this file, please
+
+ONE=2
+TWO=1
+
+ # (thanks)
+
+THREE=3
+
+FOO=bar
+# FOO=somethingelse
diff --git a/tests/fixtures/env/resolve.env b/tests/fixtures/env/resolve.env
new file mode 100644
index 00000000..b4f76b29
--- /dev/null
+++ b/tests/fixtures/env/resolve.env
@@ -0,0 +1,4 @@
+FILE_DEF=bär
+FILE_DEF_EMPTY=
+ENV_DEF
+NO_DEF
diff --git a/tests/fixtures/env/two.env b/tests/fixtures/env/two.env
new file mode 100644
index 00000000..3b21871a
--- /dev/null
+++ b/tests/fixtures/env/two.env
@@ -0,0 +1,2 @@
+FOO=baz
+DOO=dah
diff --git a/tests/fixtures/environment-composefile/docker-compose.yml b/tests/fixtures/environment-composefile/docker-compose.yml
new file mode 100644
index 00000000..9d99fee0
--- /dev/null
+++ b/tests/fixtures/environment-composefile/docker-compose.yml
@@ -0,0 +1,7 @@
+service:
+ image: busybox:latest
+ command: top
+
+ environment:
+ foo: bar
+ hello: world
diff --git a/tests/fixtures/environment-interpolation/docker-compose.yml b/tests/fixtures/environment-interpolation/docker-compose.yml
new file mode 100644
index 00000000..7ed43a81
--- /dev/null
+++ b/tests/fixtures/environment-interpolation/docker-compose.yml
@@ -0,0 +1,17 @@
+web:
+ # unbracketed name
+ image: $IMAGE
+
+ # array element
+ ports:
+ - "${HOST_PORT}:8000"
+
+ # dictionary item value
+ labels:
+ mylabel: "${LABEL_VALUE}"
+
+ # unset value
+ hostname: "host-${UNSET_VALUE}"
+
+ # escaped interpolation
+ command: "$${ESCAPED}"
diff --git a/tests/fixtures/exit-code-from/docker-compose.yml b/tests/fixtures/exit-code-from/docker-compose.yml
new file mode 100644
index 00000000..687e78b9
--- /dev/null
+++ b/tests/fixtures/exit-code-from/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:latest
+ command: /bin/false
diff --git a/tests/fixtures/expose-composefile/docker-compose.yml b/tests/fixtures/expose-composefile/docker-compose.yml
new file mode 100644
index 00000000..d14a468d
--- /dev/null
+++ b/tests/fixtures/expose-composefile/docker-compose.yml
@@ -0,0 +1,11 @@
+
+simple:
+ image: busybox:latest
+ command: top
+ expose:
+ - '3000'
+ - '3001/tcp'
+ - '3001/udp'
+ - '3002-3003'
+ - '3004-3005/tcp'
+ - '3006-3007/udp'
diff --git a/tests/fixtures/extends/circle-1.yml b/tests/fixtures/extends/circle-1.yml
new file mode 100644
index 00000000..d88ea61d
--- /dev/null
+++ b/tests/fixtures/extends/circle-1.yml
@@ -0,0 +1,12 @@
+foo:
+ image: busybox
+bar:
+ image: busybox
+web:
+ extends:
+ file: circle-2.yml
+ service: other
+baz:
+ image: busybox
+quux:
+ image: busybox
diff --git a/tests/fixtures/extends/circle-2.yml b/tests/fixtures/extends/circle-2.yml
new file mode 100644
index 00000000..de05bc8d
--- /dev/null
+++ b/tests/fixtures/extends/circle-2.yml
@@ -0,0 +1,12 @@
+foo:
+ image: busybox
+bar:
+ image: busybox
+other:
+ extends:
+ file: circle-1.yml
+ service: web
+baz:
+ image: busybox
+quux:
+ image: busybox
diff --git a/tests/fixtures/extends/common-env-labels-ulimits.yml b/tests/fixtures/extends/common-env-labels-ulimits.yml
new file mode 100644
index 00000000..09efb4e7
--- /dev/null
+++ b/tests/fixtures/extends/common-env-labels-ulimits.yml
@@ -0,0 +1,13 @@
+web:
+ extends:
+ file: common.yml
+ service: web
+ environment:
+ - FOO=2
+ - BAZ=3
+ labels: ['label=one']
+ ulimits:
+ nproc: 65535
+ memlock:
+ soft: 1024
+ hard: 2048
diff --git a/tests/fixtures/extends/common.yml b/tests/fixtures/extends/common.yml
new file mode 100644
index 00000000..b2d86aa4
--- /dev/null
+++ b/tests/fixtures/extends/common.yml
@@ -0,0 +1,7 @@
+web:
+ image: busybox
+ command: /bin/true
+ net: host
+ environment:
+ - FOO=1
+ - BAR=1
diff --git a/tests/fixtures/extends/docker-compose.yml b/tests/fixtures/extends/docker-compose.yml
new file mode 100644
index 00000000..8e37d404
--- /dev/null
+++ b/tests/fixtures/extends/docker-compose.yml
@@ -0,0 +1,17 @@
+myweb:
+ extends:
+ file: common.yml
+ service: web
+ command: top
+ links:
+ - "mydb:db"
+ environment:
+ # leave FOO alone
+ # override BAR
+ BAR: "2"
+ # add BAZ
+ BAZ: "2"
+ net: bridge
+mydb:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/extends/healthcheck-1.yml b/tests/fixtures/extends/healthcheck-1.yml
new file mode 100644
index 00000000..4c311e62
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-1.yml
@@ -0,0 +1,9 @@
+version: '2.1'
+services:
+ demo:
+ image: foobar:latest
+ healthcheck:
+ test: ["CMD", "/health.sh"]
+ interval: 10s
+ timeout: 5s
+ retries: 36
diff --git a/tests/fixtures/extends/healthcheck-2.yml b/tests/fixtures/extends/healthcheck-2.yml
new file mode 100644
index 00000000..11bc9f09
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-2.yml
@@ -0,0 +1,6 @@
+version: '2.1'
+services:
+ demo:
+ extends:
+ file: healthcheck-1.yml
+ service: demo
diff --git a/tests/fixtures/extends/invalid-links.yml b/tests/fixtures/extends/invalid-links.yml
new file mode 100644
index 00000000..cea740cb
--- /dev/null
+++ b/tests/fixtures/extends/invalid-links.yml
@@ -0,0 +1,11 @@
+mydb:
+ build: '.'
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ links:
+ - "mydb:db"
diff --git a/tests/fixtures/extends/invalid-net-v2.yml b/tests/fixtures/extends/invalid-net-v2.yml
new file mode 100644
index 00000000..7ba714e8
--- /dev/null
+++ b/tests/fixtures/extends/invalid-net-v2.yml
@@ -0,0 +1,12 @@
+version: "2"
+services:
+ myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+ web:
+ build: '.'
+ network_mode: "service:net"
+ net:
+ build: '.'
diff --git a/tests/fixtures/extends/invalid-net.yml b/tests/fixtures/extends/invalid-net.yml
new file mode 100644
index 00000000..fbcd020b
--- /dev/null
+++ b/tests/fixtures/extends/invalid-net.yml
@@ -0,0 +1,8 @@
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ net: "container:db"
diff --git a/tests/fixtures/extends/invalid-volumes.yml b/tests/fixtures/extends/invalid-volumes.yml
new file mode 100644
index 00000000..3db0118e
--- /dev/null
+++ b/tests/fixtures/extends/invalid-volumes.yml
@@ -0,0 +1,9 @@
+myweb:
+ build: '.'
+ extends:
+ service: web
+ command: top
+web:
+ build: '.'
+ volumes_from:
+ - "db"
diff --git a/tests/fixtures/extends/nested-intermediate.yml b/tests/fixtures/extends/nested-intermediate.yml
new file mode 100644
index 00000000..c2dd8c94
--- /dev/null
+++ b/tests/fixtures/extends/nested-intermediate.yml
@@ -0,0 +1,6 @@
+webintermediate:
+ extends:
+ file: common.yml
+ service: web
+ environment:
+ - "FOO=2"
diff --git a/tests/fixtures/extends/nested.yml b/tests/fixtures/extends/nested.yml
new file mode 100644
index 00000000..6025e6d5
--- /dev/null
+++ b/tests/fixtures/extends/nested.yml
@@ -0,0 +1,6 @@
+myweb:
+ extends:
+ file: nested-intermediate.yml
+ service: webintermediate
+ environment:
+ - "BAR=2"
diff --git a/tests/fixtures/extends/no-file-specified.yml b/tests/fixtures/extends/no-file-specified.yml
new file mode 100644
index 00000000..40e43c4b
--- /dev/null
+++ b/tests/fixtures/extends/no-file-specified.yml
@@ -0,0 +1,9 @@
+myweb:
+ extends:
+ service: web
+ environment:
+ - "BAR=1"
+web:
+ image: busybox
+ environment:
+ - "BAZ=3"
diff --git a/tests/fixtures/extends/nonexistent-path-base.yml b/tests/fixtures/extends/nonexistent-path-base.yml
new file mode 100644
index 00000000..4e6c82b0
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-path-base.yml
@@ -0,0 +1,6 @@
+dnebase:
+ build: nonexistent.path
+ command: /bin/true
+ environment:
+ - FOO=1
+ - BAR=1
diff --git a/tests/fixtures/extends/nonexistent-path-child.yml b/tests/fixtures/extends/nonexistent-path-child.yml
new file mode 100644
index 00000000..d3b732f2
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-path-child.yml
@@ -0,0 +1,8 @@
+dnechild:
+ extends:
+ file: nonexistent-path-base.yml
+ service: dnebase
+ image: busybox
+ command: /bin/true
+ environment:
+ - BAR=2
diff --git a/tests/fixtures/extends/nonexistent-service.yml b/tests/fixtures/extends/nonexistent-service.yml
new file mode 100644
index 00000000..e9e17f1b
--- /dev/null
+++ b/tests/fixtures/extends/nonexistent-service.yml
@@ -0,0 +1,4 @@
+web:
+ image: busybox
+ extends:
+ service: foo
diff --git a/tests/fixtures/extends/service-with-invalid-schema.yml b/tests/fixtures/extends/service-with-invalid-schema.yml
new file mode 100644
index 00000000..00c36647
--- /dev/null
+++ b/tests/fixtures/extends/service-with-invalid-schema.yml
@@ -0,0 +1,4 @@
+myweb:
+ extends:
+ file: valid-composite-extends.yml
+ service: web
diff --git a/tests/fixtures/extends/service-with-valid-composite-extends.yml b/tests/fixtures/extends/service-with-valid-composite-extends.yml
new file mode 100644
index 00000000..6c419ed0
--- /dev/null
+++ b/tests/fixtures/extends/service-with-valid-composite-extends.yml
@@ -0,0 +1,5 @@
+myweb:
+ build: '.'
+ extends:
+ file: 'valid-composite-extends.yml'
+ service: web
diff --git a/tests/fixtures/extends/specify-file-as-self.yml b/tests/fixtures/extends/specify-file-as-self.yml
new file mode 100644
index 00000000..c24f10bc
--- /dev/null
+++ b/tests/fixtures/extends/specify-file-as-self.yml
@@ -0,0 +1,17 @@
+myweb:
+ extends:
+ file: specify-file-as-self.yml
+ service: web
+ environment:
+ - "BAR=1"
+web:
+ extends:
+ file: specify-file-as-self.yml
+ service: otherweb
+ image: busybox
+ environment:
+ - "BAZ=3"
+otherweb:
+ image: busybox
+ environment:
+ - "YEP=1"
diff --git a/tests/fixtures/extends/valid-common-config.yml b/tests/fixtures/extends/valid-common-config.yml
new file mode 100644
index 00000000..d8f13e7a
--- /dev/null
+++ b/tests/fixtures/extends/valid-common-config.yml
@@ -0,0 +1,6 @@
+myweb:
+ build: '.'
+ extends:
+ file: valid-common.yml
+ service: common-config
+ command: top
diff --git a/tests/fixtures/extends/valid-common.yml b/tests/fixtures/extends/valid-common.yml
new file mode 100644
index 00000000..07ad68e3
--- /dev/null
+++ b/tests/fixtures/extends/valid-common.yml
@@ -0,0 +1,3 @@
+common-config:
+ environment:
+ - FOO=1
diff --git a/tests/fixtures/extends/valid-composite-extends.yml b/tests/fixtures/extends/valid-composite-extends.yml
new file mode 100644
index 00000000..8816c3f3
--- /dev/null
+++ b/tests/fixtures/extends/valid-composite-extends.yml
@@ -0,0 +1,2 @@
+web:
+ command: top
diff --git a/tests/fixtures/extends/valid-interpolation-2.yml b/tests/fixtures/extends/valid-interpolation-2.yml
new file mode 100644
index 00000000..cb7bd93f
--- /dev/null
+++ b/tests/fixtures/extends/valid-interpolation-2.yml
@@ -0,0 +1,3 @@
+web:
+ build: '.'
+ hostname: "host-${HOSTNAME_VALUE}"
diff --git a/tests/fixtures/extends/valid-interpolation.yml b/tests/fixtures/extends/valid-interpolation.yml
new file mode 100644
index 00000000..68e8740f
--- /dev/null
+++ b/tests/fixtures/extends/valid-interpolation.yml
@@ -0,0 +1,5 @@
+myweb:
+ extends:
+ service: web
+ file: valid-interpolation-2.yml
+ command: top
diff --git a/tests/fixtures/extends/verbose-and-shorthand.yml b/tests/fixtures/extends/verbose-and-shorthand.yml
new file mode 100644
index 00000000..d3816302
--- /dev/null
+++ b/tests/fixtures/extends/verbose-and-shorthand.yml
@@ -0,0 +1,15 @@
+base:
+ image: busybox
+ environment:
+ - "BAR=1"
+
+verbose:
+ extends:
+ service: base
+ environment:
+ - "FOO=1"
+
+shorthand:
+ extends: base
+ environment:
+ - "FOO=2"
diff --git a/tests/fixtures/healthcheck/docker-compose.yml b/tests/fixtures/healthcheck/docker-compose.yml
new file mode 100644
index 00000000..2c45b8d8
--- /dev/null
+++ b/tests/fixtures/healthcheck/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "3"
+services:
+ passes:
+ image: busybox
+ command: top
+ healthcheck:
+ test: "/bin/true"
+ interval: 1s
+ timeout: 30m
+ retries: 1
+
+ fails:
+ image: busybox
+ command: top
+ healthcheck:
+ test: ["CMD", "/bin/false"]
+ interval: 2.5s
+ retries: 2
+
+ disabled:
+ image: busybox
+ command: top
+ healthcheck:
+ disable: true
diff --git a/tests/fixtures/invalid-composefile/invalid.yml b/tests/fixtures/invalid-composefile/invalid.yml
new file mode 100644
index 00000000..0e74be44
--- /dev/null
+++ b/tests/fixtures/invalid-composefile/invalid.yml
@@ -0,0 +1,5 @@
+
+notaservice: oops
+
+web:
+ image: 'alpine:edge'
diff --git a/tests/fixtures/links-composefile/docker-compose.yml b/tests/fixtures/links-composefile/docker-compose.yml
new file mode 100644
index 00000000..930fd4c7
--- /dev/null
+++ b/tests/fixtures/links-composefile/docker-compose.yml
@@ -0,0 +1,11 @@
+db:
+ image: busybox:latest
+ command: top
+web:
+ image: busybox:latest
+ command: top
+ links:
+ - db:db
+console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/logging-composefile-legacy/docker-compose.yml b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
new file mode 100644
index 00000000..ee994107
--- /dev/null
+++ b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
@@ -0,0 +1,10 @@
+simple:
+ image: busybox:latest
+ command: top
+ log_driver: "none"
+another:
+ image: busybox:latest
+ command: top
+ log_driver: "json-file"
+ log_opt:
+ max-size: "10m"
diff --git a/tests/fixtures/logging-composefile/docker-compose.yml b/tests/fixtures/logging-composefile/docker-compose.yml
new file mode 100644
index 00000000..466d13e5
--- /dev/null
+++ b/tests/fixtures/logging-composefile/docker-compose.yml
@@ -0,0 +1,14 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ logging:
+ driver: "none"
+ another:
+ image: busybox:latest
+ command: top
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
diff --git a/tests/fixtures/logs-composefile/docker-compose.yml b/tests/fixtures/logs-composefile/docker-compose.yml
new file mode 100644
index 00000000..b719c91e
--- /dev/null
+++ b/tests/fixtures/logs-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:latest
+ command: sh -c "echo test"
diff --git a/tests/fixtures/logs-tail-composefile/docker-compose.yml b/tests/fixtures/logs-tail-composefile/docker-compose.yml
new file mode 100644
index 00000000..80d8feae
--- /dev/null
+++ b/tests/fixtures/logs-tail-composefile/docker-compose.yml
@@ -0,0 +1,3 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo a && echo b && echo c && echo d"
diff --git a/tests/fixtures/longer-filename-composefile/docker-compose.yaml b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
new file mode 100644
index 00000000..a4eba2d0
--- /dev/null
+++ b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
@@ -0,0 +1,3 @@
+definedinyamlnotyml:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/multiple-composefiles/compose2.yml b/tests/fixtures/multiple-composefiles/compose2.yml
new file mode 100644
index 00000000..56803380
--- /dev/null
+++ b/tests/fixtures/multiple-composefiles/compose2.yml
@@ -0,0 +1,3 @@
+yetanother:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/multiple-composefiles/docker-compose.yml b/tests/fixtures/multiple-composefiles/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/multiple-composefiles/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/net-container/docker-compose.yml b/tests/fixtures/net-container/docker-compose.yml
new file mode 100644
index 00000000..b5506e0e
--- /dev/null
+++ b/tests/fixtures/net-container/docker-compose.yml
@@ -0,0 +1,7 @@
+foo:
+ image: busybox
+ command: top
+ net: "container:bar"
+bar:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/net-container/v2-invalid.yml b/tests/fixtures/net-container/v2-invalid.yml
new file mode 100644
index 00000000..9b846295
--- /dev/null
+++ b/tests/fixtures/net-container/v2-invalid.yml
@@ -0,0 +1,10 @@
+version: "2"
+
+services:
+ foo:
+ image: busybox
+ command: top
+ bar:
+ image: busybox
+ command: top
+ net: "container:foo"
diff --git a/tests/fixtures/networks/bridge.yml b/tests/fixtures/networks/bridge.yml
new file mode 100644
index 00000000..9fa7db82
--- /dev/null
+++ b/tests/fixtures/networks/bridge.yml
@@ -0,0 +1,9 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - bridge
+ - default
diff --git a/tests/fixtures/networks/default-network-config.yml b/tests/fixtures/networks/default-network-config.yml
new file mode 100644
index 00000000..4bd0989b
--- /dev/null
+++ b/tests/fixtures/networks/default-network-config.yml
@@ -0,0 +1,13 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
+networks:
+ default:
+ driver: bridge
+ driver_opts:
+ "com.docker.network.bridge.enable_icc": "false"
diff --git a/tests/fixtures/networks/docker-compose.yml b/tests/fixtures/networks/docker-compose.yml
new file mode 100644
index 00000000..c11fa682
--- /dev/null
+++ b/tests/fixtures/networks/docker-compose.yml
@@ -0,0 +1,21 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks: ["front"]
+ app:
+ image: busybox
+ command: top
+ networks: ["front", "back"]
+ links:
+ - "db:database"
+ db:
+ image: busybox
+ command: top
+ networks: ["back"]
+
+networks:
+ front: {}
+ back: {}
diff --git a/tests/fixtures/networks/external-default.yml b/tests/fixtures/networks/external-default.yml
new file mode 100644
index 00000000..5c9426b8
--- /dev/null
+++ b/tests/fixtures/networks/external-default.yml
@@ -0,0 +1,12 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
+networks:
+ default:
+ external:
+ name: composetest_external_network
diff --git a/tests/fixtures/networks/external-networks.yml b/tests/fixtures/networks/external-networks.yml
new file mode 100644
index 00000000..db75b780
--- /dev/null
+++ b/tests/fixtures/networks/external-networks.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - networks_foo
+ - bar
+
+networks:
+ networks_foo:
+ external: true
+ bar:
+ external:
+ name: networks_bar
diff --git a/tests/fixtures/networks/missing-network.yml b/tests/fixtures/networks/missing-network.yml
new file mode 100644
index 00000000..41012535
--- /dev/null
+++ b/tests/fixtures/networks/missing-network.yml
@@ -0,0 +1,10 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks: ["foo"]
+
+networks:
+ bar: {}
diff --git a/tests/fixtures/networks/network-aliases.yml b/tests/fixtures/networks/network-aliases.yml
new file mode 100644
index 00000000..8cf7d5af
--- /dev/null
+++ b/tests/fixtures/networks/network-aliases.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ front:
+ aliases:
+ - forward_facing
+ - ahead
+ back:
+
+networks:
+ front: {}
+ back: {}
diff --git a/tests/fixtures/networks/network-internal.yml b/tests/fixtures/networks/network-internal.yml
new file mode 100755
index 00000000..1fa339b1
--- /dev/null
+++ b/tests/fixtures/networks/network-internal.yml
@@ -0,0 +1,13 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - internal
+
+networks:
+ internal:
+ driver: bridge
+ internal: True
diff --git a/tests/fixtures/networks/network-label.yml b/tests/fixtures/networks/network-label.yml
new file mode 100644
index 00000000..fdb24f65
--- /dev/null
+++ b/tests/fixtures/networks/network-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - network_with_label
+
+networks:
+ network_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/fixtures/networks/network-mode.yml b/tests/fixtures/networks/network-mode.yml
new file mode 100644
index 00000000..e4d070b4
--- /dev/null
+++ b/tests/fixtures/networks/network-mode.yml
@@ -0,0 +1,27 @@
+version: "2"
+
+services:
+ bridge:
+ image: busybox
+ command: top
+ network_mode: bridge
+
+ service:
+ image: busybox
+ command: top
+ network_mode: "service:bridge"
+
+ container:
+ image: busybox
+ command: top
+ network_mode: "container:composetest_network_mode_container"
+
+ host:
+ image: busybox
+ command: top
+ network_mode: host
+
+ none:
+ image: busybox
+ command: top
+ network_mode: none
diff --git a/tests/fixtures/networks/network-static-addresses.yml b/tests/fixtures/networks/network-static-addresses.yml
new file mode 100755
index 00000000..f820ff6a
--- /dev/null
+++ b/tests/fixtures/networks/network-static-addresses.yml
@@ -0,0 +1,23 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ static_test:
+ ipv4_address: 172.16.100.100
+ ipv6_address: fe80::1001:100
+
+networks:
+ static_test:
+ driver: bridge
+ driver_opts:
+ com.docker.network.enable_ipv6: "true"
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.16.100.0/24
+ gateway: 172.16.100.1
+ - subnet: fe80::/64
+ gateway: fe80::1001:1
diff --git a/tests/fixtures/no-composefile/.gitignore b/tests/fixtures/no-composefile/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/no-composefile/.gitignore
diff --git a/tests/fixtures/no-links-composefile/docker-compose.yml b/tests/fixtures/no-links-composefile/docker-compose.yml
new file mode 100644
index 00000000..75a6a085
--- /dev/null
+++ b/tests/fixtures/no-links-composefile/docker-compose.yml
@@ -0,0 +1,9 @@
+db:
+ image: busybox:latest
+ command: top
+web:
+ image: busybox:latest
+ command: top
+console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/no-services/docker-compose.yml b/tests/fixtures/no-services/docker-compose.yml
new file mode 100644
index 00000000..6e76ec0c
--- /dev/null
+++ b/tests/fixtures/no-services/docker-compose.yml
@@ -0,0 +1,5 @@
+version: "2"
+
+networks:
+ foo: {}
+ bar: {}
diff --git a/tests/fixtures/override-files/docker-compose.override.yml b/tests/fixtures/override-files/docker-compose.override.yml
new file mode 100644
index 00000000..b2c54060
--- /dev/null
+++ b/tests/fixtures/override-files/docker-compose.override.yml
@@ -0,0 +1,7 @@
+version: '2.2'
+services:
+ web:
+ command: "top"
+
+ db:
+ command: "top"
diff --git a/tests/fixtures/override-files/docker-compose.yml b/tests/fixtures/override-files/docker-compose.yml
new file mode 100644
index 00000000..6c3d4e17
--- /dev/null
+++ b/tests/fixtures/override-files/docker-compose.yml
@@ -0,0 +1,10 @@
+version: '2.2'
+services:
+ web:
+ image: busybox:latest
+ command: "sleep 200"
+ depends_on:
+ - db
+ db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/override-files/extra.yml b/tests/fixtures/override-files/extra.yml
new file mode 100644
index 00000000..492c3795
--- /dev/null
+++ b/tests/fixtures/override-files/extra.yml
@@ -0,0 +1,10 @@
+version: '2.2'
+services:
+ web:
+ depends_on:
+ - db
+ - other
+
+ other:
+ image: busybox:latest
+ command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.override.yaml b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.yml b/tests/fixtures/override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/pid-mode/docker-compose.yml b/tests/fixtures/pid-mode/docker-compose.yml
new file mode 100644
index 00000000..fece5a9f
--- /dev/null
+++ b/tests/fixtures/pid-mode/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2.2"
+
+services:
+ service:
+ image: busybox
+ command: top
+ pid: "service:container"
+
+ container:
+ image: busybox
+ command: top
+ pid: "container:composetest_pid_mode_container"
+
+ host:
+ image: busybox
+ command: top
+ pid: host
diff --git a/tests/fixtures/ports-composefile-scale/docker-compose.yml b/tests/fixtures/ports-composefile-scale/docker-compose.yml
new file mode 100644
index 00000000..1a2bb485
--- /dev/null
+++ b/tests/fixtures/ports-composefile-scale/docker-compose.yml
@@ -0,0 +1,6 @@
+
+simple:
+ image: busybox:latest
+ command: /bin/sleep 300
+ ports:
+ - '3000'
diff --git a/tests/fixtures/ports-composefile/docker-compose.yml b/tests/fixtures/ports-composefile/docker-compose.yml
new file mode 100644
index 00000000..c213068d
--- /dev/null
+++ b/tests/fixtures/ports-composefile/docker-compose.yml
@@ -0,0 +1,8 @@
+
+simple:
+ image: busybox:latest
+ command: top
+ ports:
+ - '3000'
+ - '49152:3001'
+ - '49153-49154:3002-3003'
diff --git a/tests/fixtures/ports-composefile/expanded-notation.yml b/tests/fixtures/ports-composefile/expanded-notation.yml
new file mode 100644
index 00000000..09a7a2bf
--- /dev/null
+++ b/tests/fixtures/ports-composefile/expanded-notation.yml
@@ -0,0 +1,15 @@
+version: '3.2'
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ ports:
+ - target: 3000
+ - target: 3001
+ published: 53222
+ - target: 3002
+ published: 53223
+ protocol: tcp
+ - target: 3003
+ published: 53224
+ protocol: udp
diff --git a/tests/fixtures/restart/docker-compose.yml b/tests/fixtures/restart/docker-compose.yml
new file mode 100644
index 00000000..ecfdfbf5
--- /dev/null
+++ b/tests/fixtures/restart/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2"
+services:
+ never:
+ image: busybox
+ restart: "no"
+ always:
+ image: busybox
+ restart: always
+ on-failure:
+ image: busybox
+ restart: on-failure
+ on-failure-5:
+ image: busybox
+ restart: "on-failure:5"
+ restart-null:
+ image: busybox
+ restart: ""
diff --git a/tests/fixtures/run-workdir/docker-compose.yml b/tests/fixtures/run-workdir/docker-compose.yml
new file mode 100644
index 00000000..dc3ea86a
--- /dev/null
+++ b/tests/fixtures/run-workdir/docker-compose.yml
@@ -0,0 +1,4 @@
+service:
+ image: busybox:latest
+ working_dir: /etc
+ command: /bin/true
diff --git a/tests/fixtures/scale/docker-compose.yml b/tests/fixtures/scale/docker-compose.yml
new file mode 100644
index 00000000..a0d3b771
--- /dev/null
+++ b/tests/fixtures/scale/docker-compose.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ web:
+ image: busybox
+ command: top
+ scale: 2
+ db:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/secrets/default b/tests/fixtures/secrets/default
new file mode 100644
index 00000000..f9dc2014
--- /dev/null
+++ b/tests/fixtures/secrets/default
@@ -0,0 +1 @@
+This is the secret
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
new file mode 100644
index 00000000..fe717151
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ simple:
+ image: busybox:latest
+ volumes:
+ - datastore:/data1
+
+volumes:
+ datastore:
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
new file mode 100644
index 00000000..98a7d23b
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ image: busybox:latest
diff --git a/tests/fixtures/simple-composefile-volume-ready/files/example.txt b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
new file mode 100644
index 00000000..edb4d339
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
@@ -0,0 +1 @@
+FILE_CONTENT
diff --git a/tests/fixtures/simple-composefile/digest.yml b/tests/fixtures/simple-composefile/digest.yml
new file mode 100644
index 00000000..08f1d993
--- /dev/null
+++ b/tests/fixtures/simple-composefile/digest.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+digest:
+ image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
+ command: top
diff --git a/tests/fixtures/simple-composefile/docker-compose.yml b/tests/fixtures/simple-composefile/docker-compose.yml
new file mode 100644
index 00000000..b25beaf4
--- /dev/null
+++ b/tests/fixtures/simple-composefile/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/simple-composefile/ignore-pull-failures.yml b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
new file mode 100644
index 00000000..a28f7922
--- /dev/null
+++ b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: nonexisting-image:latest
+ command: top
diff --git a/tests/fixtures/simple-dockerfile/Dockerfile b/tests/fixtures/simple-dockerfile/Dockerfile
new file mode 100644
index 00000000..dd864b83
--- /dev/null
+++ b/tests/fixtures/simple-dockerfile/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+CMD echo "success"
diff --git a/tests/fixtures/simple-dockerfile/docker-compose.yml b/tests/fixtures/simple-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..b0357541
--- /dev/null
+++ b/tests/fixtures/simple-dockerfile/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ build: .
diff --git a/tests/fixtures/simple-failing-dockerfile/Dockerfile b/tests/fixtures/simple-failing-dockerfile/Dockerfile
new file mode 100644
index 00000000..c2d06b16
--- /dev/null
+++ b/tests/fixtures/simple-failing-dockerfile/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+LABEL com.docker.compose.test_failing_image=true
+# With the following label the container wil be cleaned up automatically
+# Must be kept in sync with LABEL_PROJECT from compose/const.py
+LABEL com.docker.compose.project=composetest
+RUN exit 1
diff --git a/tests/fixtures/simple-failing-dockerfile/docker-compose.yml b/tests/fixtures/simple-failing-dockerfile/docker-compose.yml
new file mode 100644
index 00000000..b0357541
--- /dev/null
+++ b/tests/fixtures/simple-failing-dockerfile/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ build: .
diff --git a/tests/fixtures/sleeps-composefile/docker-compose.yml b/tests/fixtures/sleeps-composefile/docker-compose.yml
new file mode 100644
index 00000000..7c8d84f8
--- /dev/null
+++ b/tests/fixtures/sleeps-composefile/docker-compose.yml
@@ -0,0 +1,10 @@
+
+version: "2"
+
+services:
+ simple:
+ image: busybox:latest
+ command: sleep 200
+ another:
+ image: busybox:latest
+ command: sleep 200
diff --git a/tests/fixtures/stop-signal-composefile/docker-compose.yml b/tests/fixtures/stop-signal-composefile/docker-compose.yml
new file mode 100644
index 00000000..04f58aa9
--- /dev/null
+++ b/tests/fixtures/stop-signal-composefile/docker-compose.yml
@@ -0,0 +1,10 @@
+simple:
+ image: busybox:latest
+ command:
+ - sh
+ - '-c'
+ - |
+ trap 'exit 0' SIGINT
+ trap 'exit 1' SIGTERM
+ while true; do :; done
+ stop_signal: SIGINT
diff --git a/tests/fixtures/tls/ca.pem b/tests/fixtures/tls/ca.pem
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/ca.pem
diff --git a/tests/fixtures/tls/cert.pem b/tests/fixtures/tls/cert.pem
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/cert.pem
diff --git a/tests/fixtures/tls/key.key b/tests/fixtures/tls/key.key
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/fixtures/tls/key.key
diff --git a/tests/fixtures/top/docker-compose.yml b/tests/fixtures/top/docker-compose.yml
new file mode 100644
index 00000000..d632a836
--- /dev/null
+++ b/tests/fixtures/top/docker-compose.yml
@@ -0,0 +1,6 @@
+service_a:
+ image: busybox:latest
+ command: top
+service_b:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/unicode-environment/docker-compose.yml b/tests/fixtures/unicode-environment/docker-compose.yml
new file mode 100644
index 00000000..a41af4f0
--- /dev/null
+++ b/tests/fixtures/unicode-environment/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '2'
+services:
+ simple:
+ image: busybox:latest
+ command: sh -c 'echo $$FOO'
+ environment:
+ FOO: ${BAR}
diff --git a/tests/fixtures/user-composefile/docker-compose.yml b/tests/fixtures/user-composefile/docker-compose.yml
new file mode 100644
index 00000000..3eb7d397
--- /dev/null
+++ b/tests/fixtures/user-composefile/docker-compose.yml
@@ -0,0 +1,4 @@
+service:
+ image: busybox:latest
+ user: notauser
+ command: id
diff --git a/tests/fixtures/v1-config/docker-compose.yml b/tests/fixtures/v1-config/docker-compose.yml
new file mode 100644
index 00000000..8646c4ed
--- /dev/null
+++ b/tests/fixtures/v1-config/docker-compose.yml
@@ -0,0 +1,10 @@
+net:
+ image: busybox
+volume:
+ image: busybox
+ volumes:
+ - /data
+app:
+ image: busybox
+ net: "container:net"
+ volumes_from: ["volume"]
diff --git a/tests/fixtures/v2-dependencies/docker-compose.yml b/tests/fixtures/v2-dependencies/docker-compose.yml
new file mode 100644
index 00000000..2e14b94b
--- /dev/null
+++ b/tests/fixtures/v2-dependencies/docker-compose.yml
@@ -0,0 +1,13 @@
+version: "2.0"
+services:
+ db:
+ image: busybox:latest
+ command: top
+ web:
+ image: busybox:latest
+ command: top
+ depends_on:
+ - db
+ console:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v2-full/Dockerfile b/tests/fixtures/v2-full/Dockerfile
new file mode 100644
index 00000000..51ed0d90
--- /dev/null
+++ b/tests/fixtures/v2-full/Dockerfile
@@ -0,0 +1,4 @@
+
+FROM busybox:latest
+RUN echo something
+CMD top
diff --git a/tests/fixtures/v2-full/docker-compose.yml b/tests/fixtures/v2-full/docker-compose.yml
new file mode 100644
index 00000000..a973dd0c
--- /dev/null
+++ b/tests/fixtures/v2-full/docker-compose.yml
@@ -0,0 +1,24 @@
+
+version: "2"
+
+volumes:
+ data:
+ driver: local
+
+networks:
+ front: {}
+
+services:
+ web:
+ build: .
+ networks:
+ - front
+ - default
+ volumes_from:
+ - other
+
+ other:
+ image: busybox:latest
+ command: top
+ volumes:
+ - /data
diff --git a/tests/fixtures/v2-simple/docker-compose.yml b/tests/fixtures/v2-simple/docker-compose.yml
new file mode 100644
index 00000000..c99ae02f
--- /dev/null
+++ b/tests/fixtures/v2-simple/docker-compose.yml
@@ -0,0 +1,8 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v2-simple/links-invalid.yml b/tests/fixtures/v2-simple/links-invalid.yml
new file mode 100644
index 00000000..481aa404
--- /dev/null
+++ b/tests/fixtures/v2-simple/links-invalid.yml
@@ -0,0 +1,10 @@
+version: "2"
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ links:
+ - another
+ another:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/v3-full/docker-compose.yml b/tests/fixtures/v3-full/docker-compose.yml
new file mode 100644
index 00000000..2bc0e248
--- /dev/null
+++ b/tests/fixtures/v3-full/docker-compose.yml
@@ -0,0 +1,57 @@
+version: "3.2"
+services:
+ web:
+ image: busybox
+
+ deploy:
+ mode: replicated
+ replicas: 6
+ labels: [FOO=BAR]
+ update_config:
+ parallelism: 3
+ delay: 10s
+ failure_action: continue
+ monitor: 60s
+ max_failure_ratio: 0.3
+ resources:
+ limits:
+ cpus: '0.001'
+ memory: 50M
+ reservations:
+ cpus: '0.0001'
+ memory: 20M
+ restart_policy:
+ condition: on_failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+ placement:
+ constraints: [node=foo]
+
+ healthcheck:
+ test: cat /etc/passwd
+ interval: 10s
+ timeout: 1s
+ retries: 5
+
+ volumes:
+ - source: /host/path
+ target: /container/path
+ type: bind
+ read_only: true
+ - source: foobar
+ type: volume
+ target: /container/volumepath
+ - type: volume
+ target: /anonymous
+ - type: volume
+ source: foobar
+ target: /container/volumepath2
+ volume:
+ nocopy: true
+
+ stop_grace_period: 20s
+volumes:
+ foobar:
+ labels:
+ com.docker.compose.test: 'true'
diff --git a/tests/fixtures/volume-path-interpolation/docker-compose.yml b/tests/fixtures/volume-path-interpolation/docker-compose.yml
new file mode 100644
index 00000000..6d4e236a
--- /dev/null
+++ b/tests/fixtures/volume-path-interpolation/docker-compose.yml
@@ -0,0 +1,5 @@
+test:
+ image: busybox
+ command: top
+ volumes:
+ - "~/${VOLUME_NAME}:/container-path"
diff --git a/tests/fixtures/volume-path/common/services.yml b/tests/fixtures/volume-path/common/services.yml
new file mode 100644
index 00000000..2dbf7596
--- /dev/null
+++ b/tests/fixtures/volume-path/common/services.yml
@@ -0,0 +1,5 @@
+db:
+ image: busybox
+ volumes:
+ - ./foo:/foo
+ - ./bar:/bar
diff --git a/tests/fixtures/volume-path/docker-compose.yml b/tests/fixtures/volume-path/docker-compose.yml
new file mode 100644
index 00000000..af433c52
--- /dev/null
+++ b/tests/fixtures/volume-path/docker-compose.yml
@@ -0,0 +1,6 @@
+db:
+ extends:
+ file: common/services.yml
+ service: db
+ volumes:
+ - ./bar:/bar
diff --git a/tests/fixtures/volume/docker-compose.yml b/tests/fixtures/volume/docker-compose.yml
new file mode 100644
index 00000000..4335b0a0
--- /dev/null
+++ b/tests/fixtures/volume/docker-compose.yml
@@ -0,0 +1,11 @@
+version: '2'
+services:
+ test:
+ image: busybox
+ command: top
+ volumes:
+ - /container-path
+ - testvolume:/container-named-path
+
+volumes:
+ testvolume: {}
diff --git a/tests/fixtures/volumes-from-container/docker-compose.yml b/tests/fixtures/volumes-from-container/docker-compose.yml
new file mode 100644
index 00000000..495fcaae
--- /dev/null
+++ b/tests/fixtures/volumes-from-container/docker-compose.yml
@@ -0,0 +1,5 @@
+version: "2"
+services:
+ test:
+ image: busybox
+ volumes_from: ["container:composetest_data_container"]
diff --git a/tests/fixtures/volumes/docker-compose.yml b/tests/fixtures/volumes/docker-compose.yml
new file mode 100644
index 00000000..da711ac4
--- /dev/null
+++ b/tests/fixtures/volumes/docker-compose.yml
@@ -0,0 +1,2 @@
+version: '2.1'
+services: {}
diff --git a/tests/fixtures/volumes/external-volumes-v2-x.yml b/tests/fixtures/volumes/external-volumes-v2-x.yml
new file mode 100644
index 00000000..3b736c5f
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2-x.yml
@@ -0,0 +1,17 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v2.yml b/tests/fixtures/volumes/external-volumes-v2.yml
new file mode 100644
index 00000000..4025b53b
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-4.yml b/tests/fixtures/volumes/external-volumes-v3-4.yml
new file mode 100644
index 00000000..76c8421d
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-4.yml
@@ -0,0 +1,17 @@
+version: "3.4"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-x.yml b/tests/fixtures/volumes/external-volumes-v3-x.yml
new file mode 100644
index 00000000..903fee64
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-x.yml
@@ -0,0 +1,16 @@
+version: "3.0"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/volume-label.yml b/tests/fixtures/volumes/volume-label.yml
new file mode 100644
index 00000000..a5f33a5a
--- /dev/null
+++ b/tests/fixtures/volumes/volume-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - volume_with_label:/data
+
+volumes:
+ volume_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 00000000..a93de993
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,50 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+
+from compose.config.config import ConfigDetails
+from compose.config.config import ConfigFile
+from compose.config.config import load
+
+
+def build_config(contents, **kwargs):
+ return load(build_config_details(contents, **kwargs))
+
+
+def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
+ return ConfigDetails(
+ working_dir,
+ [ConfigFile(filename, contents)],
+ )
+
+
+def create_host_file(client, filename):
+ dirname = os.path.dirname(filename)
+
+ with open(filename, 'r') as fh:
+ content = fh.read()
+
+ container = client.create_container(
+ 'busybox:latest',
+ ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
+ volumes={dirname: {}},
+ host_config=client.create_host_config(
+ binds={dirname: {'bind': dirname, 'ro': False}},
+ network_mode='none',
+ ),
+ )
+ try:
+ client.start(container)
+ exitcode = client.wait(container)
+
+ if exitcode != 0:
+ output = client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}".format(exitcode, output))
+
+ container_info = client.inspect_container(container)
+ if 'Node' in container_info:
+ return container_info['Node']['Name']
+ finally:
+ client.remove_container(container, force=True)
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/__init__.py
diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py
new file mode 100644
index 00000000..2ff610fb
--- /dev/null
+++ b/tests/integration/network_test.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .testcases import DockerClientTestCase
+from compose.const import LABEL_NETWORK
+from compose.const import LABEL_PROJECT
+from compose.network import Network
+
+
+class NetworkTest(DockerClientTestCase):
+ def test_network_default_labels(self):
+ net = Network(self.client, 'composetest', 'foonet')
+ net.ensure()
+ net_data = net.inspect()
+ labels = net_data['Labels']
+ assert labels[LABEL_NETWORK] == net.name
+ assert labels[LABEL_PROJECT] == net.project
diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py
new file mode 100644
index 00000000..953dd52b
--- /dev/null
+++ b/tests/integration/project_test.py
@@ -0,0 +1,1636 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os.path
+import random
+
+import py
+import pytest
+from docker.errors import APIError
+from docker.errors import NotFound
+
+from .. import mock
+from ..helpers import build_config as load_config
+from ..helpers import create_host_file
+from .testcases import DockerClientTestCase
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
+from compose.config import config
+from compose.config import ConfigurationError
+from compose.config import types
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.container import Container
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
+from compose.project import Project
+from compose.project import ProjectError
+from compose.service import ConvergenceStrategy
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+
+def build_config(**kwargs):
+ return config.Config(
+ version=kwargs.get('version'),
+ services=kwargs.get('services'),
+ volumes=kwargs.get('volumes'),
+ networks=kwargs.get('networks'),
+ secrets=kwargs.get('secrets'),
+ configs=kwargs.get('configs'),
+ )
+
+
+class ProjectTest(DockerClientTestCase):
+
+ def test_containers(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+
+ containers = project.containers()
+ self.assertEqual(len(containers), 2)
+
+ @pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug')
+ def test_containers_stopped(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+ assert len(project.containers()) == 2
+ assert len(project.containers(stopped=True)) == 2
+
+ project.stop()
+ assert len(project.containers()) == 0
+ assert len(project.containers(stopped=True)) == 2
+
+ def test_containers_with_service_names(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+
+ containers = project.containers(['web'])
+ self.assertEqual(
+ [c.name for c in containers],
+ ['composetest_web_1'])
+
+ def test_containers_with_extra_service(self):
+ web = self.create_service('web')
+ web_1 = web.create_container()
+
+ db = self.create_service('db')
+ db_1 = db.create_container()
+
+ self.create_service('extra').create_container()
+
+ project = Project('composetest', [web, db], self.client)
+ self.assertEqual(
+ set(project.containers(stopped=True)),
+ set([web_1, db_1]),
+ )
+
+ def test_volumes_from_service(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'data': {
+ 'image': 'busybox:latest',
+ 'volumes': ['/var/data'],
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['data'],
+ },
+ }),
+ client=self.client,
+ )
+ db = project.get_service('db')
+ data = project.get_service('data')
+ self.assertEqual(db.volumes_from, [VolumeFromSpec(data, 'rw', 'service')])
+
+ def test_volumes_from_container(self):
+ data_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ volumes=['/var/data'],
+ name='composetest_data_container',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['composetest_data_container'],
+ },
+ }),
+ client=self.client,
+ )
+ db = project.get_service('db')
+ self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw'])
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_network_mode_from_service(self):
+ project = Project.from_config(
+ name='composetest',
+ client=self.client,
+ config_data=load_config({
+ 'version': str(V2_0),
+ 'services': {
+ 'net': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'network_mode': 'service:net',
+ 'command': ["top"]
+ },
+ },
+ }),
+ )
+
+ project.up()
+
+ web = project.get_service('web')
+ net = project.get_service('net')
+ self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+
+ @v2_only()
+ @no_cluster('container networks not supported in Swarm')
+ def test_network_mode_from_container(self):
+ def get_project():
+ return Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'version': str(V2_0),
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'network_mode': 'container:composetest_net_container'
+ },
+ },
+ }),
+ client=self.client,
+ )
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ get_project()
+
+ assert "container 'composetest_net_container' which does not exist" in excinfo.exconly()
+
+ net_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ name='composetest_net_container',
+ command='top',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ net_container.start()
+
+ project = get_project()
+ project.up()
+
+ web = project.get_service('web')
+ self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+
+ @no_cluster('container networks not supported in Swarm')
+ def test_net_from_service_v1(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'net': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'net': 'container:net',
+ 'command': ["top"]
+ },
+ }),
+ client=self.client,
+ )
+
+ project.up()
+
+ web = project.get_service('web')
+ net = project.get_service('net')
+ self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+
+ @no_cluster('container networks not supported in Swarm')
+ def test_net_from_container_v1(self):
+ def get_project():
+ return Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'web': {
+ 'image': 'busybox:latest',
+ 'net': 'container:composetest_net_container'
+ },
+ }),
+ client=self.client,
+ )
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ get_project()
+
+ assert "container 'composetest_net_container' which does not exist" in excinfo.exconly()
+
+ net_container = Container.create(
+ self.client,
+ image='busybox:latest',
+ name='composetest_net_container',
+ command='top',
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ )
+ net_container.start()
+
+ project = get_project()
+ project.up()
+
+ web = project.get_service('web')
+ self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+
+ def test_start_pause_unpause_stop_kill_remove(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.start()
+
+ self.assertEqual(len(web.containers()), 0)
+ self.assertEqual(len(db.containers()), 0)
+
+ web_container_1 = web.create_container()
+ web_container_2 = web.create_container()
+ db_container = db.create_container()
+
+ project.start(service_names=['web'])
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running),
+ set([web_container_1.name, web_container_2.name]))
+
+ project.start()
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running),
+ set([web_container_1.name, web_container_2.name, db_container.name]))
+
+ project.pause(service_names=['web'])
+ self.assertEqual(
+ set([c.name for c in project.containers() if c.is_paused]),
+ set([web_container_1.name, web_container_2.name]))
+
+ project.pause()
+ self.assertEqual(
+ set([c.name for c in project.containers() if c.is_paused]),
+ set([web_container_1.name, web_container_2.name, db_container.name]))
+
+ project.unpause(service_names=['db'])
+ self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2)
+
+ project.unpause()
+ self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0)
+
+ project.stop(service_names=['web'], timeout=1)
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running), set([db_container.name])
+ )
+
+ project.kill(service_names=['db'])
+ self.assertEqual(len([c for c in project.containers() if c.is_running]), 0)
+ self.assertEqual(len(project.containers(stopped=True)), 3)
+
+ project.remove_stopped(service_names=['web'])
+ self.assertEqual(len(project.containers(stopped=True)), 1)
+
+ project.remove_stopped()
+ self.assertEqual(len(project.containers(stopped=True)), 0)
+
+ def test_create(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+
+ project.create(['db'])
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ assert not containers[0].is_running
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ assert len(web.containers(stopped=True)) == 0
+
+ def test_create_twice(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+
+ project.create(['db', 'web'])
+ project.create(['db', 'web'])
+ containers = project.containers(stopped=True)
+ assert len(containers) == 2
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ web_containers = web.containers(stopped=True)
+ assert len(web_containers) == 1
+ assert not web_containers[0].is_running
+
+ def test_create_with_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'db')])
+ project = Project('composetest', [db, web], self.client)
+
+ project.create(['web'])
+ # self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers(stopped=True)) == 2
+ assert not [c for c in project.containers(stopped=True) if c.is_running]
+ assert len(db.containers(stopped=True)) == 1
+ assert len(web.containers(stopped=True)) == 1
+
+ def test_create_strategy_always(self):
+ db = self.create_service('db')
+ project = Project('composetest', [db], self.client)
+ project.create(['db'])
+ old_id = project.containers(stopped=True)[0].id
+
+ project.create(['db'], strategy=ConvergenceStrategy.always)
+ assert len(project.containers(stopped=True)) == 1
+
+ db_container = project.containers(stopped=True)[0]
+ assert not db_container.is_running
+ assert db_container.id != old_id
+
+ def test_create_strategy_never(self):
+ db = self.create_service('db')
+ project = Project('composetest', [db], self.client)
+ project.create(['db'])
+ old_id = project.containers(stopped=True)[0].id
+
+ project.create(['db'], strategy=ConvergenceStrategy.never)
+ assert len(project.containers(stopped=True)) == 1
+
+ db_container = project.containers(stopped=True)[0]
+ assert not db_container.is_running
+ assert db_container.id == old_id
+
+ def test_project_up(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(web.containers()), 0)
+
+ def test_project_up_starts_uncreated_services(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'db')])
+ project = Project('composetest', [db, web], self.client)
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+
+ project.up()
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(web.containers()), 1)
+
+ def test_recreate_preserves_volumes(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ old_db_id = project.containers()[0].id
+ db_volume_path = project.containers()[0].get('Volumes./etc')
+
+ project.up(strategy=ConvergenceStrategy.always)
+ self.assertEqual(len(project.containers()), 2)
+
+ db_container = [c for c in project.containers() if 'db' in c.name][0]
+ self.assertNotEqual(db_container.id, old_db_id)
+ self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
+
+ def test_project_up_with_no_recreate_running(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ self.assertEqual(len(project.containers()), 1)
+ old_db_id = project.containers()[0].id
+ container, = project.containers()
+ db_volume_path = container.get_mount('/var/db')['Source']
+
+ project.up(strategy=ConvergenceStrategy.never)
+ self.assertEqual(len(project.containers()), 2)
+
+ db_container = [c for c in project.containers() if 'db' in c.name][0]
+ self.assertEqual(db_container.id, old_db_id)
+ self.assertEqual(
+ db_container.get_mount('/var/db')['Source'],
+ db_volume_path)
+
+ def test_project_up_with_no_recreate_stopped(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'])
+ project.kill()
+
+ old_containers = project.containers(stopped=True)
+
+ self.assertEqual(len(old_containers), 1)
+ old_container, = old_containers
+ old_db_id = old_container.id
+ db_volume_path = old_container.get_mount('/var/db')['Source']
+
+ project.up(strategy=ConvergenceStrategy.never)
+
+ new_containers = project.containers(stopped=True)
+ self.assertEqual(len(new_containers), 2)
+ self.assertEqual([c.is_running for c in new_containers], [True, True])
+
+ db_container = [c for c in new_containers if 'db' in c.name][0]
+ self.assertEqual(db_container.id, old_db_id)
+ self.assertEqual(
+ db_container.get_mount('/var/db')['Source'],
+ db_volume_path)
+
+ def test_project_up_without_all_services(self):
+ console = self.create_service('console')
+ db = self.create_service('db')
+ project = Project('composetest', [console, db], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up()
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 1)
+
+ def test_project_up_starts_links(self):
+ console = self.create_service('console')
+ db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ web = self.create_service('web', links=[(db, 'db')])
+
+ project = Project('composetest', [web, db, console], self.client)
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['web'])
+ self.assertEqual(len(project.containers()), 2)
+ self.assertEqual(len(web.containers()), 1)
+ self.assertEqual(len(db.containers()), 1)
+ self.assertEqual(len(console.containers()), 0)
+
+ def test_project_up_starts_depends(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'console': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ },
+ 'data': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'volumes_from': ['data'],
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'links': ['db'],
+ },
+ }),
+ client=self.client,
+ )
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['web'])
+ self.assertEqual(len(project.containers()), 3)
+ self.assertEqual(len(project.get_service('web').containers()), 1)
+ self.assertEqual(len(project.get_service('db').containers()), 1)
+ self.assertEqual(len(project.get_service('data').containers()), 1)
+ self.assertEqual(len(project.get_service('console').containers()), 0)
+
+ def test_project_up_with_no_deps(self):
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'console': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ },
+ 'data': {
+ 'image': 'busybox:latest',
+ 'command': ["top"]
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'volumes_from': ['data'],
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': ["top"],
+ 'links': ['db'],
+ },
+ }),
+ client=self.client,
+ )
+ project.start()
+ self.assertEqual(len(project.containers()), 0)
+
+ project.up(['db'], start_deps=False)
+ self.assertEqual(len(project.containers(stopped=True)), 2)
+ self.assertEqual(len(project.get_service('web').containers()), 0)
+ self.assertEqual(len(project.get_service('db').containers()), 1)
+ self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
+ assert not project.get_service('data').containers(stopped=True)[0].is_running
+ self.assertEqual(len(project.get_service('console').containers()), 0)
+
+ def test_project_up_recreate_with_tmpfs_volume(self):
+ # https://github.com/docker/compose/issues/4751
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'version': '2.1',
+ 'services': {
+ 'foo': {
+ 'image': 'busybox:latest',
+ 'tmpfs': ['/dev/shm'],
+ 'volumes': ['/dev/shm']
+ }
+ }
+ }), client=self.client
+ )
+ project.up()
+ project.up(strategy=ConvergenceStrategy.always)
+
+ def test_unscale_after_restart(self):
+ web = self.create_service('web')
+ project = Project('composetest', [web], self.client)
+
+ project.start()
+
+ service = project.get_service('web')
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(3)
+ self.assertEqual(len(service.containers()), 3)
+ project.up()
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ project.up(scale_override={'web': 3})
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 3)
+ # does scale=0 ,makes any sense? after recreating at least 1 container is running
+ service.scale(0)
+ project.up()
+ service = project.get_service('web')
+ self.assertEqual(len(service.containers()), 1)
+
+ @v2_only()
+ def test_project_up_networks(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'foo': None,
+ 'bar': None,
+ 'baz': {'aliases': ['extra']},
+ },
+ }],
+ networks={
+ 'foo': {'driver': 'bridge'},
+ 'bar': {'driver': None},
+ 'baz': {},
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ containers = project.containers()
+ assert len(containers) == 1
+ container, = containers
+
+ for net_name in ['foo', 'bar', 'baz']:
+ full_net_name = 'composetest_{}'.format(net_name)
+ network_data = self.client.inspect_network(full_net_name)
+ assert network_data['Name'] == full_net_name
+
+ aliases_key = 'NetworkSettings.Networks.{net}.Aliases'
+ assert 'web' in container.get(aliases_key.format(net='composetest_foo'))
+ assert 'web' in container.get(aliases_key.format(net='composetest_baz'))
+ assert 'extra' in container.get(aliases_key.format(net='composetest_baz'))
+
+ foo_data = self.client.inspect_network('composetest_foo')
+ assert foo_data['Driver'] == 'bridge'
+
+ @v2_only()
+ def test_up_with_ipam_config(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'front': None},
+ }],
+ networks={
+ 'front': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.bridge.enable_icc": "false",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [{
+ "subnet": "172.28.0.0/16",
+ "ip_range": "172.28.5.0/24",
+ "gateway": "172.28.5.254",
+ "aux_addresses": {
+ "a": "172.28.1.5",
+ "b": "172.28.1.6",
+ "c": "172.28.1.7",
+ },
+ }],
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_front'])[0]
+
+ assert network['Options'] == {
+ "com.docker.network.bridge.enable_icc": "false"
+ }
+
+ assert network['IPAM'] == {
+ 'Driver': 'default',
+ 'Options': None,
+ 'Config': [{
+ 'Subnet': "172.28.0.0/16",
+ 'IPRange': "172.28.5.0/24",
+ 'Gateway': "172.28.5.254",
+ 'AuxiliaryAddresses': {
+ 'a': '172.28.1.5',
+ 'b': '172.28.1.6',
+ 'c': '172.28.1.7',
+ },
+ }],
+ }
+
+ @v2_only()
+ def test_up_with_ipam_options(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'front': None},
+ }],
+ networks={
+ 'front': {
+ 'driver': 'bridge',
+ 'ipam': {
+ 'driver': 'default',
+ 'options': {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_front'])[0]
+
+ assert network['IPAM']['Options'] == {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+
+ @v2_1_only()
+ def test_up_with_network_static_addresses(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'static_test': {
+ 'ipv4_address': '172.16.100.100',
+ 'ipv6_address': 'fe80::1001:102'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.enable_ipv6": "true",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {"subnet": "172.16.100.0/24",
+ "gateway": "172.16.100.1"},
+ {"subnet": "fe80::/64",
+ "gateway": "fe80::1001:1"}
+ ]
+ },
+ 'enable_ipv6': True,
+ }
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up(detached=True)
+
+ service_container = project.get_service('web').containers()[0]
+
+ IPAMConfig = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert IPAMConfig.get('IPv4Address') == '172.16.100.100'
+ assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102'
+
+ @v2_1_only()
+ def test_up_with_enable_ipv6(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'static_test': {
+ 'ipv6_address': 'fe80::1001:102'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'enable_ipv6': True,
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {"subnet": "fe80::/64",
+ "gateway": "fe80::1001:1"}
+ ]
+ }
+ }
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up(detached=True)
+ network = [n for n in self.client.networks() if 'static_test' in n['Name']][0]
+ service_container = project.get_service('web').containers()[0]
+
+ assert network['EnableIPv6'] is True
+ ipam_config = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert ipam_config.get('IPv6Address') == 'fe80::1001:102'
+
+ @v2_only()
+ def test_up_with_network_static_addresses_missing_subnet(self):
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'static_test': {
+ 'ipv4_address': '172.16.100.100',
+ 'ipv6_address': 'fe80::1001:101'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'driver_opts': {
+ "com.docker.network.enable_ipv6": "true",
+ },
+ 'ipam': {
+ 'driver': 'default',
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+
+ with self.assertRaises(ProjectError):
+ project.up()
+
+ @v2_1_only()
+ def test_up_with_network_link_local_ips(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'linklocaltest': {
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }],
+ networks={
+ 'linklocaltest': {'driver': 'bridge'}
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ ipam_config = service_container.inspect().get(
+ 'NetworkSettings', {}
+ ).get(
+ 'Networks', {}
+ ).get(
+ 'composetest_linklocaltest', {}
+ ).get('IPAMConfig', {})
+ assert 'LinkLocalIPs' in ipam_config
+ assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
+
+ @v2_1_only()
+ def test_up_with_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'default'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ assert service_container.inspect()['HostConfig']['Isolation'] == 'default'
+
+ @v2_1_only()
+ def test_up_with_invalid_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'foobar'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ with self.assertRaises(ProjectError):
+ project.up()
+
+ @v2_only()
+ def test_project_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'internal': None},
+ }],
+ networks={
+ 'internal': {'driver': 'bridge', 'internal': True},
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_internal'])[0]
+
+ assert network['Internal'] is True
+
+ @v2_1_only()
+ def test_project_up_with_network_label(self):
+ self.require_api_version('1.23')
+
+ network_name = 'network_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {network_name: None}
+ }],
+ networks={
+ network_name: {'labels': {'label_key': 'label_val'}}
+ }
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+
+ project.up()
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].startswith('composetest_')
+ ]
+
+ assert [n['Name'] for n in networks] == ['composetest_{}'.format(network_name)]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_project_up_volumes(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.up()
+ self.assertEqual(len(project.containers()), 1)
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v2_1_only()
+ def test_project_up_with_volume_labels(self):
+ self.require_api_version('1.23')
+
+ volume_name = 'volume_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'volumes': [VolumeSpec.parse('{}:/data'.format(volume_name))]
+ }],
+ volumes={
+ volume_name: {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+
+ project.up()
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('composetest_')
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set(
+ ['composetest_{}'.format(volume_name)]
+ )
+
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_only()
+ def test_project_up_logging_with_multiple_files(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {'image': 'busybox:latest', 'command': 'top'},
+ 'another': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'logging': {
+ 'driver': "json-file",
+ 'options': {
+ 'max-size': "10m"
+ }
+ }
+ }
+ }
+
+ })
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'another': {
+ 'logging': {
+ 'driver': "none"
+ }
+ }
+ }
+
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ tmpdir = py.test.ensuretemp('logging_test')
+ self.addCleanup(tmpdir.remove)
+ with tmpdir.as_cwd():
+ config_data = config.load(details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ self.assertEqual(len(containers), 2)
+
+ another = project.get_service('another').containers()[0]
+ log_config = another.get('HostConfig.LogConfig')
+ self.assertTrue(log_config)
+ self.assertEqual(log_config.get('Type'), 'none')
+
+ @v2_only()
+ def test_project_up_port_mappings_with_multiple_files(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': ['1234:1234']
+ },
+ },
+
+ })
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'ports': ['1234:1234']
+ }
+ }
+
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ config_data = config.load(details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ self.assertEqual(len(containers), 1)
+
+ @v2_2_only()
+ def test_project_up_config_scale(self):
+ config_data = build_config(
+ version=V2_2,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'scale': 3
+ }]
+ )
+
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ assert len(project.containers()) == 3
+
+ project.up(scale_override={'web': 2})
+ assert len(project.containers()) == 2
+
+ project.up(scale_override={'web': 4})
+ assert len(project.containers()) == 4
+
+ project.stop()
+ project.up()
+ assert len(project.containers()) == 3
+
+ @v2_only()
+ def test_initialize_volumes(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ assert volume_data['Driver'] == 'local'
+
+ @v2_only()
+ def test_project_up_implicit_volume_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.up()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v3_only()
+ def test_project_up_with_secrets(self):
+ node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default'))
+
+ config_data = build_config(
+ version=V3_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'cat /run/secrets/special',
+ 'secrets': [
+ types.ServiceSecret.parse({'source': 'super', 'target': 'special'}),
+ ],
+ 'environment': ['constraint:node=={}'.format(node if node is not None else '*')]
+ }],
+ secrets={
+ 'super': {
+ 'file': os.path.abspath('tests/fixtures/secrets/default'),
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+ project.stop()
+
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ container, = containers
+
+ output = container.logs()
+ assert output == b"This is the secret\n"
+
+ @v2_only()
+ def test_initialize_volumes_invalid_volume_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'foobar'}},
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ with self.assertRaises(APIError if is_cluster(self.client) else config.ConfigurationError):
+ project.volumes.initialize()
+
+ @v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_initialize_volumes_updated_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ config_data = config_data._replace(
+ volumes={vol_name: {'driver': 'smb'}}
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+ with self.assertRaises(config.ConfigurationError) as e:
+ project.volumes.initialize()
+ assert 'Configuration for volume {0} specifies driver smb'.format(
+ vol_name
+ ) in str(e.exception)
+
+ @v2_only()
+ def test_initialize_volumes_updated_blank_driver(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={vol_name: {'driver': 'local'}},
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ config_data = config_data._replace(
+ volumes={vol_name: {}}
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+ project.volumes.initialize()
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ self.assertEqual(volume_data['Driver'], 'local')
+
+ @v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_initialize_volumes_external_volumes(self):
+ # Use composetest_ prefix so it gets garbage-collected in tearDown()
+ vol_name = 'composetest_{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ self.client.create_volume(vol_name)
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={
+ vol_name: {'external': True, 'name': vol_name}
+ },
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ with self.assertRaises(NotFound):
+ self.client.inspect_volume(full_vol_name)
+
+ @v2_only()
+ def test_initialize_volumes_inexistent_external_volume(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={
+ vol_name: {'external': True, 'name': vol_name}
+ },
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ with self.assertRaises(config.ConfigurationError) as e:
+ project.volumes.initialize()
+ assert 'Volume {0} declared as external'.format(
+ vol_name
+ ) in str(e.exception)
+
+ @v2_only()
+ def test_project_up_named_volumes_in_binds(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': str(V2_0),
+ 'services': {
+ 'simple': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'volumes': ['{0}:/data'.format(vol_name)]
+ },
+ },
+ 'volumes': {
+ vol_name: {'driver': 'local'}
+ }
+
+ })
+ config_details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(config_details)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ service = project.services[0]
+ self.assertEqual(service.name, 'simple')
+ volumes = service.options.get('volumes')
+ self.assertEqual(len(volumes), 1)
+ self.assertEqual(volumes[0].external, full_vol_name)
+ project.up()
+ engine_volumes = self.client.volumes()['Volumes']
+ container = service.get_container()
+ assert [mount['Name'] for mount in container.get('Mounts')] == [full_vol_name]
+ assert next((v for v in engine_volumes if v['Name'] == vol_name), None) is None
+
+ def test_project_up_orphans(self):
+ config_dict = {
+ 'service1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ }
+ }
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ config_dict['service2'] = config_dict['service1']
+ del config_dict['service1']
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with mock.patch('compose.project.log') as mock_log:
+ project.up()
+
+ mock_log.warning.assert_called_once_with(mock.ANY)
+
+ assert len([
+ ctnr for ctnr in project._labeled_containers()
+ if ctnr.labels.get(LABEL_SERVICE) == 'service1'
+ ]) == 1
+
+ project.up(remove_orphans=True)
+
+ assert len([
+ ctnr for ctnr in project._labeled_containers()
+ if ctnr.labels.get(LABEL_SERVICE) == 'service1'
+ ]) == 0
+
+ @v2_1_only()
+ def test_project_up_healthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 0',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 2
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ assert svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_unhealthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(HealthCheckFailed):
+ svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_no_healthcheck_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'disable': True
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(NoHealthCheckConfigured):
+ svc1.is_healthy()
diff --git a/tests/integration/resilience_test.py b/tests/integration/resilience_test.py
new file mode 100644
index 00000000..2a2d1b56
--- /dev/null
+++ b/tests/integration/resilience_test.py
@@ -0,0 +1,57 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .. import mock
+from .testcases import DockerClientTestCase
+from compose.config.types import VolumeSpec
+from compose.project import Project
+from compose.service import ConvergenceStrategy
+
+
+class ResilienceTest(DockerClientTestCase):
+ def setUp(self):
+ self.db = self.create_service(
+ 'db',
+ volumes=[VolumeSpec.parse('/var/db')],
+ command='top')
+ self.project = Project('composetest', [self.db], self.client)
+
+ container = self.db.create_container()
+ self.db.start_container(container)
+ self.host_path = container.get_mount('/var/db')['Source']
+
+ def tearDown(self):
+ del self.project
+ del self.db
+ super(ResilienceTest, self).tearDown()
+
+ def test_successful_recreate(self):
+ self.project.up(strategy=ConvergenceStrategy.always)
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+ def test_create_failure(self):
+ with mock.patch('compose.service.Service.create_container', crash):
+ with self.assertRaises(Crash):
+ self.project.up(strategy=ConvergenceStrategy.always)
+
+ self.project.up()
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+ def test_start_failure(self):
+ with mock.patch('compose.service.Service.start_container', crash):
+ with self.assertRaises(Crash):
+ self.project.up(strategy=ConvergenceStrategy.always)
+
+ self.project.up()
+ container = self.db.containers()[0]
+ self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+
+
+class Crash(Exception):
+ pass
+
+
+def crash(*args, **kwargs):
+ raise Crash()
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
new file mode 100644
index 00000000..3ddf991b
--- /dev/null
+++ b/tests/integration/service_test.py
@@ -0,0 +1,1380 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from distutils.spawn import find_executable
+from os import path
+
+import pytest
+from docker.errors import APIError
+from six import StringIO
+from six import text_type
+
+from .. import mock
+from .testcases import DockerClientTestCase
+from .testcases import get_links
+from .testcases import pull_busybox
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
+from .testcases import SWARM_SKIP_CPU_SHARES
+from compose import __version__
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.const import LABEL_CONFIG_HASH
+from compose.const import LABEL_CONTAINER_NUMBER
+from compose.const import LABEL_ONE_OFF
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.const import LABEL_VERSION
+from compose.container import Container
+from compose.errors import OperationFailedError
+from compose.project import OneOffFilter
+from compose.service import ConvergencePlan
+from compose.service import ConvergenceStrategy
+from compose.service import NetworkMode
+from compose.service import PidMode
+from compose.service import Service
+from compose.utils import parse_nanoseconds_int
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_3_only
+from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+
+def create_and_start_container(service, **override_options):
+ container = service.create_container(**override_options)
+ return service.start_container(container)
+
+
+class ServiceTest(DockerClientTestCase):
+
+ def test_containers(self):
+ foo = self.create_service('foo')
+ bar = self.create_service('bar')
+
+ create_and_start_container(foo)
+
+ self.assertEqual(len(foo.containers()), 1)
+ self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
+ self.assertEqual(len(bar.containers()), 0)
+
+ create_and_start_container(bar)
+ create_and_start_container(bar)
+
+ self.assertEqual(len(foo.containers()), 1)
+ self.assertEqual(len(bar.containers()), 2)
+
+ names = [c.name for c in bar.containers()]
+ self.assertIn('composetest_bar_1', names)
+ self.assertIn('composetest_bar_2', names)
+
+ def test_containers_one_off(self):
+ db = self.create_service('db')
+ container = db.create_container(one_off=True)
+ self.assertEqual(db.containers(stopped=True), [])
+ self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container])
+
+ def test_project_is_added_to_container_name(self):
+ service = self.create_service('web')
+ create_and_start_container(service)
+ self.assertEqual(service.containers()[0].name, 'composetest_web_1')
+
+ def test_create_container_with_one_off(self):
+ db = self.create_service('db')
+ container = db.create_container(one_off=True)
+ self.assertEqual(container.name, 'composetest_db_run_1')
+
+ def test_create_container_with_one_off_when_existing_container_is_running(self):
+ db = self.create_service('db')
+ db.start()
+ container = db.create_container(one_off=True)
+ self.assertEqual(container.name, 'composetest_db_run_1')
+
+ def test_create_container_with_unspecified_volume(self):
+ service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get_mount('/var/db')
+
+ def test_create_container_with_volume_driver(self):
+ service = self.create_service('db', volume_driver='foodriver')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
+
+ @pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug')
+ def test_create_container_with_cpu_shares(self):
+ service = self.create_service('db', cpu_shares=73)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuShares'), 73)
+
+ def test_create_container_with_cpu_quota(self):
+ service = self.create_service('db', cpu_quota=40000)
+ container = service.create_container()
+ container.start()
+ self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
+
+ @v2_2_only()
+ def test_create_container_with_cpu_count(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_count=2)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuCount'), 2)
+
+ @v2_2_only()
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux')
+ def test_create_container_with_cpu_percent(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_percent=12)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuPercent'), 12)
+
+ @v2_2_only()
+ def test_create_container_with_cpus(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpus=1)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.NanoCpus'), 1000000000)
+
+ def test_create_container_with_shm_size(self):
+ self.require_api_version('1.22')
+ service = self.create_service('db', shm_size=67108864)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
+
+ def test_create_container_with_init_bool(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', init=True)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.Init') is True
+
+ @pytest.mark.xfail(True, reason='Option has been removed in Engine 17.06.0')
+ def test_create_container_with_init_path(self):
+ self.require_api_version('1.25')
+ docker_init_path = find_executable('docker-init')
+ service = self.create_service('db', init=docker_init_path)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.InitPath') == docker_init_path
+
+ @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
+ def test_create_container_with_pids_limit(self):
+ self.require_api_version('1.23')
+ service = self.create_service('db', pids_limit=10)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.PidsLimit') == 10
+
+ def test_create_container_with_extra_hosts_list(self):
+ extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
+ service = self.create_service('db', extra_hosts=extra_hosts)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
+
+ def test_create_container_with_extra_hosts_dicts(self):
+ extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
+ extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
+ service = self.create_service('db', extra_hosts=extra_hosts)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
+
+ def test_create_container_with_cpu_set(self):
+ service = self.create_service('db', cpuset='0')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
+
+ def test_create_container_with_read_only_root_fs(self):
+ read_only = True
+ service = self.create_service('db', read_only=read_only)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.ReadonlyRootfs') == read_only
+
+ def test_create_container_with_blkio_config(self):
+ blkio_config = {
+ 'weight': 300,
+ 'weight_device': [{'path': '/dev/sda', 'weight': 200}],
+ 'device_read_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024 * 100}],
+ 'device_read_iops': [{'path': '/dev/sda', 'rate': 1000}],
+ 'device_write_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024}],
+ 'device_write_iops': [{'path': '/dev/sda', 'rate': 800}]
+ }
+ service = self.create_service('web', blkio_config=blkio_config)
+ container = service.create_container()
+ assert container.get('HostConfig.BlkioWeight') == 300
+ assert container.get('HostConfig.BlkioWeightDevice') == [{
+ 'Path': '/dev/sda', 'Weight': 200
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024 * 100
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 1000
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 800
+ }]
+
+ def test_create_container_with_security_opt(self):
+ security_opt = ['label:disable']
+ service = self.create_service('db', security_opt=security_opt)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
+
+ # @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ @pytest.mark.skipif(True, reason='https://github.com/moby/moby/issues/34270')
+ def test_create_container_with_storage_opt(self):
+ storage_opt = {'size': '1G'}
+ service = self.create_service('db', storage_opt=storage_opt)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
+
+ def test_create_container_with_mac_address(self):
+ service = self.create_service('db', mac_address='02:42:ac:11:65:43')
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
+
+ def test_create_container_with_specified_volume(self):
+ host_path = '/tmp/host-path'
+ container_path = '/container-path'
+
+ service = self.create_service(
+ 'db',
+ volumes=[VolumeSpec(host_path, container_path, 'rw')])
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get_mount(container_path)
+
+ # Match the last component ("host-path"), because boot2docker symlinks /tmp
+ actual_host_path = container.get_mount(container_path)['Source']
+
+ self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
+ msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
+
+ def test_create_container_with_healthcheck_config(self):
+ one_second = parse_nanoseconds_int('1s')
+ healthcheck = {
+ 'test': ['true'],
+ 'interval': 2 * one_second,
+ 'timeout': 5 * one_second,
+ 'retries': 5,
+ 'start_period': 2 * one_second
+ }
+ service = self.create_service('db', healthcheck=healthcheck)
+ container = service.create_container()
+ remote_healthcheck = container.get('Config.Healthcheck')
+ assert remote_healthcheck['Test'] == healthcheck['test']
+ assert remote_healthcheck['Interval'] == healthcheck['interval']
+ assert remote_healthcheck['Timeout'] == healthcheck['timeout']
+ assert remote_healthcheck['Retries'] == healthcheck['retries']
+ assert remote_healthcheck['StartPeriod'] == healthcheck['start_period']
+
+ def test_recreate_preserves_volume_with_trailing_slash(self):
+ """When the Compose file specifies a trailing slash in the container path, make
+ sure we copy the volume over when recreating.
+ """
+ service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')])
+ old_container = create_and_start_container(service)
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container = service.recreate_container(old_container)
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_duplicate_volume_trailing_slash(self):
+ """
+ When an image specifies a volume, and the Compose file specifies a host path
+ but adds a trailing slash, make sure that we don't create duplicate binds.
+ """
+ host_path = '/tmp/data'
+ container_path = '/data'
+ volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))]
+
+ tmp_container = self.client.create_container(
+ 'busybox', 'true',
+ volumes={container_path: {}},
+ labels={'com.docker.compose.test_image': 'true'},
+ host_config={}
+ )
+ image = self.client.commit(tmp_container)['Id']
+
+ service = self.create_service('db', image=image, volumes=volumes)
+ old_container = create_and_start_container(service)
+
+ self.assertEqual(
+ old_container.get('Config.Volumes'),
+ {container_path: {}},
+ )
+
+ service = self.create_service('db', image=image, volumes=volumes)
+ new_container = service.recreate_container(old_container)
+
+ self.assertEqual(
+ new_container.get('Config.Volumes'),
+ {container_path: {}},
+ )
+
+ self.assertEqual(service.containers(stopped=False), [new_container])
+
+ def test_create_container_with_volumes_from(self):
+ volume_service = self.create_service('data')
+ volume_container_1 = volume_service.create_container()
+ volume_container_2 = Container.create(
+ self.client,
+ image='busybox:latest',
+ command=["top"],
+ labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
+ )
+ host_service = self.create_service(
+ 'host',
+ volumes_from=[
+ VolumeFromSpec(volume_service, 'rw', 'service'),
+ VolumeFromSpec(volume_container_2, 'rw', 'container')
+ ],
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
+ )
+ host_container = host_service.create_container()
+ host_service.start_container(host_container)
+ self.assertIn(volume_container_1.id + ':rw',
+ host_container.get('HostConfig.VolumesFrom'))
+ self.assertIn(volume_container_2.id + ':rw',
+ host_container.get('HostConfig.VolumesFrom'))
+
+ def test_execute_convergence_plan_recreate(self):
+ service = self.create_service(
+ 'db',
+ environment={'FOO': '1'},
+ volumes=[VolumeSpec.parse('/etc')],
+ entrypoint=['top'],
+ command=['-d', '1']
+ )
+ old_container = service.create_container()
+ self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
+ self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
+ self.assertIn('FOO=1', old_container.get('Config.Env'))
+ self.assertEqual(old_container.name, 'composetest_db_1')
+ service.start_container(old_container)
+ old_container.inspect() # reload volume data
+ volume_path = old_container.get_mount('/etc')['Source']
+
+ num_containers_before = len(self.client.containers(all=True))
+
+ service.options['environment']['FOO'] = '2'
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
+ self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
+ self.assertIn('FOO=2', new_container.get('Config.Env'))
+ self.assertEqual(new_container.name, 'composetest_db_1')
+ self.assertEqual(new_container.get_mount('/etc')['Source'], volume_path)
+ if not is_cluster(self.client):
+ assert (
+ 'affinity:container==%s' % old_container.id in
+ new_container.get('Config.Env')
+ )
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert old_container.get('Node.Name') == new_container.get('Node.Name')
+
+ self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
+ self.assertNotEqual(old_container.id, new_container.id)
+ self.assertRaises(APIError,
+ self.client.inspect_container,
+ old_container.id)
+
+ def test_execute_convergence_plan_recreate_twice(self):
+ service = self.create_service(
+ 'db',
+ volumes=[VolumeSpec.parse('/etc')],
+ entrypoint=['top'],
+ command=['-d', '1'])
+
+ orig_container = service.create_container()
+ service.start_container(orig_container)
+
+ orig_container.inspect() # reload volume data
+ volume_path = orig_container.get_mount('/etc')['Source']
+
+ # Do this twice to reproduce the bug
+ for _ in range(2):
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [orig_container]))
+
+ assert new_container.get_mount('/etc')['Source'] == volume_path
+ if not is_cluster(self.client):
+ assert ('affinity:container==%s' % orig_container.id in
+ new_container.get('Config.Env'))
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert orig_container.get('Node.Name') == new_container.get('Node.Name')
+
+ orig_container = new_container
+
+ def test_execute_convergence_plan_when_containers_are_stopped(self):
+ service = self.create_service(
+ 'db',
+ environment={'FOO': '1'},
+ volumes=[VolumeSpec.parse('/var/db')],
+ entrypoint=['top'],
+ command=['-d', '1']
+ )
+ service.create_container()
+
+ containers = service.containers(stopped=True)
+ self.assertEqual(len(containers), 1)
+ container, = containers
+ self.assertFalse(container.is_running)
+
+ service.execute_convergence_plan(ConvergencePlan('start', [container]))
+
+ containers = service.containers()
+ self.assertEqual(len(containers), 1)
+ container.inspect()
+ self.assertEqual(container, containers[0])
+ self.assertTrue(container.is_running)
+
+ def test_execute_convergence_plan_with_image_declared_volume(self):
+ service = Service(
+ project='composetest',
+ name='db',
+ client=self.client,
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ )
+
+ old_container = create_and_start_container(service)
+ self.assertEqual(
+ [mount['Destination'] for mount in old_container.get('Mounts')], ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ self.assertEqual(
+ [mount['Destination'] for mount in new_container.get('Mounts')],
+ ['/data']
+ )
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_execute_convergence_plan_when_image_volume_masks_config(self):
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ )
+
+ old_container = create_and_start_container(service)
+ self.assertEqual(
+ [mount['Destination'] for mount in old_container.get('Mounts')],
+ ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
+
+ with mock.patch('compose.service.log') as mock_log:
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warn.mock_calls[0]
+ self.assertIn(
+ "Service \"db\" is using volume \"/data\" from the previous container",
+ args[0])
+
+ self.assertEqual(
+ [mount['Destination'] for mount in new_container.get('Mounts')],
+ ['/data']
+ )
+ self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ def test_execute_convergence_plan_when_host_volume_is_removed(self):
+ host_path = '/tmp/host-path'
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ volumes=[VolumeSpec(host_path, '/data', 'rw')])
+
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/data']
+ )
+ service.options['volumes'] = []
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]))
+
+ assert not mock_log.warn.called
+ assert (
+ [mount['Destination'] for mount in new_container.get('Mounts')] ==
+ ['/data']
+ )
+ assert new_container.get_mount('/data')['Source'] != host_path
+
+ def test_execute_convergence_plan_without_start(self):
+ service = self.create_service(
+ 'db',
+ build={'context': 'tests/fixtures/dockerfile-with-volume'}
+ )
+
+ containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ containers = service.execute_convergence_plan(
+ ConvergencePlan('recreate', containers),
+ start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ service.execute_convergence_plan(ConvergencePlan('start', containers), start=False)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
+
+ def test_start_container_passes_through_options(self):
+ db = self.create_service('db')
+ create_and_start_container(db, environment={'FOO': 'BAR'})
+ self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+
+ def test_start_container_inherits_options_from_constructor(self):
+ db = self.create_service('db', environment={'FOO': 'BAR'})
+ create_and_start_container(db)
+ self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_creates_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, None)])
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'])
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_creates_links_with_names(self):
+ db = self.create_service('db')
+ web = self.create_service('web', links=[(db, 'custom_link_name')])
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'custom_link_name'])
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_container_with_external_links(self):
+ db = self.create_service('db')
+ web = self.create_service('web', external_links=['composetest_db_1',
+ 'composetest_db_2',
+ 'composetest_db_3:db_3'])
+
+ for _ in range(3):
+ create_and_start_container(db)
+ create_and_start_container(web)
+
+ self.assertEqual(
+ set(get_links(web.containers()[0])),
+ set([
+ 'composetest_db_1',
+ 'composetest_db_2',
+ 'db_3']),
+ )
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_normal_container_does_not_create_links_to_its_own_service(self):
+ db = self.create_service('db')
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+
+ c = create_and_start_container(db)
+ self.assertEqual(set(get_links(c)), set([]))
+
+ @no_cluster('No legacy links support in Swarm')
+ def test_start_one_off_container_creates_links_to_its_own_service(self):
+ db = self.create_service('db')
+
+ create_and_start_container(db)
+ create_and_start_container(db)
+
+ c = create_and_start_container(db, one_off=OneOffFilter.only)
+
+ self.assertEqual(
+ set(get_links(c)),
+ set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'])
+ )
+
+ def test_start_container_builds_images(self):
+ service = Service(
+ name='test',
+ client=self.client,
+ build={'context': 'tests/fixtures/simple-dockerfile'},
+ project='composetest',
+ )
+ container = create_and_start_container(service)
+ container.wait()
+ self.assertIn(b'success', container.logs())
+ assert len(self.client.images(name='composetest_test')) >= 1
+
+ def test_start_container_uses_tagged_image_if_it_exists(self):
+ self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
+ service = Service(
+ name='test',
+ client=self.client,
+ build={'context': 'this/does/not/exist/and/will/throw/error'},
+ project='composetest',
+ )
+ container = create_and_start_container(service)
+ container.wait()
+ self.assertIn(b'success', container.logs())
+
+ def test_start_container_creates_ports(self):
+ service = self.create_service('web', ports=[8000])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
+ self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+
+ def test_build(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('web', build={'context': base_dir})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert self.client.inspect_image('composetest_web')
+
+ def test_build_non_ascii_filename(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
+ f.write("hello world\n")
+
+ service = self.create_service('web', build={'context': text_type(base_dir)})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert self.client.inspect_image('composetest_web')
+
+ def test_build_with_image_name(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ image_name = 'examples/composetest:latest'
+ self.addCleanup(self.client.remove_image, image_name)
+ self.create_service('web', build={'context': base_dir}, image=image_name).build()
+ assert self.client.inspect_image(image_name)
+
+ def test_build_with_git_url(self):
+ build_url = "https://github.com/dnephin/docker-build-from-url.git"
+ service = self.create_service('buildwithurl', build={'context': build_url})
+ self.addCleanup(self.client.remove_image, service.image_name)
+ service.build()
+ assert service.image()
+
+ def test_build_with_build_args(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+ f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
+
+ service = self.create_service('buildwithargs',
+ build={'context': text_type(base_dir),
+ 'args': {"build_version": "1"}})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert service.image()
+ assert "build_version=1" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_args_override(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+ f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
+
+ service = self.create_service('buildwithargs',
+ build={'context': text_type(base_dir),
+ 'args': {"build_version": "1"}})
+ service.build(build_args_override={'build_version': '2'})
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert "build_version=2" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_labels(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+
+ service = self.create_service('buildlabels', build={
+ 'context': text_type(base_dir),
+ 'labels': {'com.docker.compose.test': 'true'}
+ })
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test'] == 'true'
+
+ @no_cluster('Container networks not on Swarm')
+ def test_build_with_network(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ f.write('RUN ping -c1 google.local\n')
+
+ net_container = self.client.create_container(
+ 'busybox', 'top', host_config=self.client.create_host_config(
+ extra_hosts={'google.local': '127.0.0.1'}
+ ), name='composetest_build_network'
+ )
+
+ self.addCleanup(self.client.remove_container, net_container, force=True)
+ self.client.start(net_container)
+
+ service = self.create_service('buildwithnet', build={
+ 'context': text_type(base_dir),
+ 'network': 'container:{}'.format(net_container['Id'])
+ })
+
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
+ @v2_3_only()
+ @no_cluster('Not supported on UCP 2.2.0-beta1') # FIXME: remove once support is added
+ def test_build_with_target(self):
+ self.require_api_version('1.30')
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox as one\n')
+ f.write('LABEL com.docker.compose.test=true\n')
+ f.write('LABEL com.docker.compose.test.target=one\n')
+ f.write('FROM busybox as two\n')
+ f.write('LABEL com.docker.compose.test.target=two\n')
+
+ service = self.create_service('buildtarget', build={
+ 'context': text_type(base_dir),
+ 'target': 'one'
+ })
+
+ service.build()
+ assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
+
+ def test_start_container_stays_unprivileged(self):
+ service = self.create_service('web')
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['HostConfig']['Privileged'], False)
+
+ def test_start_container_becomes_privileged(self):
+ service = self.create_service('web', privileged=True)
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['HostConfig']['Privileged'], True)
+
+ def test_expose_does_not_publish_ports(self):
+ service = self.create_service('web', expose=["8000"])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
+
+ def test_start_container_creates_port_with_explicit_protocol(self):
+ service = self.create_service('web', ports=['8000/udp'])
+ container = create_and_start_container(service).inspect()
+ self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
+
+ def test_start_container_creates_fixed_external_ports(self):
+ service = self.create_service('web', ports=['8000:8000'])
+ container = create_and_start_container(service).inspect()
+ self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
+ self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+
+ def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
+ service = self.create_service('web', ports=['8001:8000'])
+ container = create_and_start_container(service).inspect()
+ self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
+ self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
+
+ def test_port_with_explicit_interface(self):
+ service = self.create_service('web', ports=[
+ '127.0.0.1:8001:8000',
+ '0.0.0.0:9001:9000/udp',
+ ])
+ container = create_and_start_container(service).inspect()
+ assert container['NetworkSettings']['Ports']['8000/tcp'] == [{
+ 'HostIp': '127.0.0.1',
+ 'HostPort': '8001',
+ }]
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostPort'] == '9001'
+ if not is_cluster(self.client):
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostIp'] == '0.0.0.0'
+ # self.assertEqual(container['NetworkSettings']['Ports'], {
+ # '8000/tcp': [
+ # {
+ # 'HostIp': '127.0.0.1',
+ # 'HostPort': '8001',
+ # },
+ # ],
+ # '9000/udp': [
+ # {
+ # 'HostIp': '0.0.0.0',
+ # 'HostPort': '9001',
+ # },
+ # ],
+ # })
+
+ def test_create_with_image_id(self):
+ # Get image id for the current busybox:latest
+ pull_busybox(self.client)
+ image_id = self.client.inspect_image('busybox:latest')['Id'][:12]
+ service = self.create_service('foo', image=image_id)
+ service.create_container()
+
+ def test_scale(self):
+ service = self.create_service('web')
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+
+ # Ensure containers don't have stdout or stdin connected
+ container = service.containers()[0]
+ config = container.inspect()['Config']
+ self.assertFalse(config['AttachStderr'])
+ self.assertFalse(config['AttachStdout'])
+ self.assertFalse(config['AttachStdin'])
+
+ service.scale(3)
+ self.assertEqual(len(service.containers()), 3)
+ service.scale(1)
+ self.assertEqual(len(service.containers()), 1)
+ service.scale(0)
+ self.assertEqual(len(service.containers()), 0)
+
+ @pytest.mark.skipif(
+ SWARM_SKIP_CONTAINERS_ALL,
+ reason='Swarm /containers/json bug'
+ )
+ def test_scale_with_stopped_containers(self):
+ """
+ Given there are some stopped containers and scale is called with a
+ desired number that is the same as the number of stopped containers,
+ test that those containers are restarted and not removed/recreated.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ valid_numbers = [next_number, next_number + 1]
+ service.create_container(number=next_number)
+ service.create_container(number=next_number + 1)
+
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ service.scale(2)
+ for container in service.containers():
+ self.assertTrue(container.is_running)
+ self.assertTrue(container.number in valid_numbers)
+
+ captured_output = mock_stderr.getvalue()
+ self.assertNotIn('Creating', captured_output)
+ self.assertIn('Starting', captured_output)
+
+ def test_scale_with_stopped_containers_and_needing_creation(self):
+ """
+ Given there are some stopped containers and scale is called with a
+ desired number that is greater than the number of stopped containers,
+ test that those containers are restarted and required number are created.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ for container in service.containers():
+ self.assertFalse(container.is_running)
+
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ service.scale(2)
+
+ self.assertEqual(len(service.containers()), 2)
+ for container in service.containers():
+ self.assertTrue(container.is_running)
+
+ captured_output = mock_stderr.getvalue()
+ self.assertIn('Creating', captured_output)
+ self.assertIn('Starting', captured_output)
+
+ def test_scale_with_api_error(self):
+ """Test that when scaling if the API returns an error, that error is handled
+ and the remaining threads continue.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ with mock.patch(
+ 'compose.container.Container.create',
+ side_effect=APIError(
+ message="testing",
+ response={},
+ explanation="Boom")):
+ with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
+
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
+ assert (
+ "ERROR: for composetest_web_2 Cannot create container for service"
+ " web: Boom" in mock_stderr.getvalue()
+ )
+
+ def test_scale_with_unexpected_exception(self):
+ """Test that when scaling if the API returns an error, that is not of type
+ APIError, that error is re-raised.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ service.create_container(number=next_number, quiet=True)
+
+ with mock.patch(
+ 'compose.container.Container.create',
+ side_effect=ValueError("BOOM")
+ ):
+ with self.assertRaises(ValueError):
+ service.scale(3)
+
+ self.assertEqual(len(service.containers()), 1)
+ self.assertTrue(service.containers()[0].is_running)
+
+ @mock.patch('compose.service.log')
+ def test_scale_with_desired_number_already_achieved(self, mock_log):
+ """
+ Test that calling scale with a desired number that is equal to the
+ number of containers already running results in no change.
+ """
+ service = self.create_service('web')
+ next_number = service._next_container_number()
+ container = service.create_container(number=next_number, quiet=True)
+ container.start()
+
+ container.inspect()
+ assert container.is_running
+ assert len(service.containers()) == 1
+
+ service.scale(1)
+ assert len(service.containers()) == 1
+ container.inspect()
+ assert container.is_running
+
+ captured_output = mock_log.info.call_args[0]
+ assert 'Desired container number already achieved' in captured_output
+
+ @mock.patch('compose.service.log')
+ def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
+ """Test that calling scale on a service that has a custom container name
+ results in warning output.
+ """
+ service = self.create_service('app', container_name='custom-container')
+ self.assertEqual(service.custom_container_name, 'custom-container')
+
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
+
+ captured_output = mock_log.warn.call_args[0][0]
+
+ self.assertEqual(len(service.containers()), 1)
+ self.assertIn(
+ "Remove the custom name to scale the service.",
+ captured_output
+ )
+
+ def test_scale_sets_ports(self):
+ service = self.create_service('web', ports=['8000'])
+ service.scale(2)
+ containers = service.containers()
+ self.assertEqual(len(containers), 2)
+ for container in containers:
+ self.assertEqual(
+ list(container.get('HostConfig.PortBindings')),
+ ['8000/tcp'])
+
+ def test_scale_with_immediate_exit(self):
+ service = self.create_service('web', image='busybox', command='true')
+ service.scale(2)
+ assert len(service.containers(stopped=True)) == 2
+
+ def test_network_mode_none(self):
+ service = self.create_service('web', network_mode=NetworkMode('none'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
+
+ def test_network_mode_bridged(self):
+ service = self.create_service('web', network_mode=NetworkMode('bridge'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
+
+ def test_network_mode_host(self):
+ service = self.create_service('web', network_mode=NetworkMode('host'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
+
+ def test_pid_mode_none_defined(self):
+ service = self.create_service('web', pid_mode=None)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.PidMode'), '')
+
+ def test_pid_mode_host(self):
+ service = self.create_service('web', pid_mode=PidMode('host'))
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.PidMode'), 'host')
+
+ @v2_1_only()
+ def test_userns_mode_none_defined(self):
+ service = self.create_service('web', userns_mode=None)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), '')
+
+ @v2_1_only()
+ def test_userns_mode_host(self):
+ service = self.create_service('web', userns_mode='host')
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), 'host')
+
+ def test_dns_no_value(self):
+ service = self.create_service('web')
+ container = create_and_start_container(service)
+ self.assertIsNone(container.get('HostConfig.Dns'))
+
+ def test_dns_list(self):
+ service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
+
+ def test_mem_swappiness(self):
+ service = self.create_service('web', mem_swappiness=11)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.MemorySwappiness'), 11)
+
+ def test_mem_reservation(self):
+ service = self.create_service('web', mem_reservation='20m')
+ container = create_and_start_container(service)
+ assert container.get('HostConfig.MemoryReservation') == 20 * 1024 * 1024
+
+ def test_restart_always_value(self):
+ service = self.create_service('web', restart={'Name': 'always'})
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
+
+ def test_oom_score_adj_value(self):
+ service = self.create_service('web', oom_score_adj=500)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.OomScoreAdj'), 500)
+
+ def test_group_add_value(self):
+ service = self.create_service('web', group_add=["root", "1"])
+ container = create_and_start_container(service)
+
+ host_container_groupadd = container.get('HostConfig.GroupAdd')
+ assert "root" in host_container_groupadd
+ assert "1" in host_container_groupadd
+
+ def test_dns_opt_value(self):
+ service = self.create_service('web', dns_opt=["use-vc", "no-tld-query"])
+ container = create_and_start_container(service)
+
+ dns_opt = container.get('HostConfig.DnsOptions')
+ assert 'use-vc' in dns_opt
+ assert 'no-tld-query' in dns_opt
+
+ def test_restart_on_failure_value(self):
+ service = self.create_service('web', restart={
+ 'Name': 'on-failure',
+ 'MaximumRetryCount': 5
+ })
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
+ self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
+
+ def test_cap_add_list(self):
+ service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
+
+ def test_cap_drop_list(self):
+ service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
+
+ def test_dns_search(self):
+ service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
+
+ @v2_only()
+ def test_tmpfs(self):
+ service = self.create_service('web', tmpfs=['/run'])
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.Tmpfs'), {'/run': ''})
+
+ def test_working_dir_param(self):
+ service = self.create_service('container', working_dir='/working/dir/sample')
+ container = service.create_container()
+ self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
+
+ def test_split_env(self):
+ service = self.create_service(
+ 'web',
+ environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
+ env = create_and_start_container(service).environment
+ for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
+ self.assertEqual(env[k], v)
+
+ def test_env_from_file_combined_with_env(self):
+ service = self.create_service(
+ 'web',
+ environment=['ONE=1', 'TWO=2', 'THREE=3'],
+ env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
+ env = create_and_start_container(service).environment
+ for k, v in {
+ 'ONE': '1',
+ 'TWO': '2',
+ 'THREE': '3',
+ 'FOO': 'baz',
+ 'DOO': 'dah'
+ }.items():
+ self.assertEqual(env[k], v)
+
+ @v3_only()
+ def test_build_with_cachefrom(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('cache_from',
+ build={'context': base_dir,
+ 'cache_from': ['build1']})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_env(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+ service = self.create_service(
+ 'web',
+ environment={
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': None,
+ 'NO_DEF': None
+ }
+ )
+ env = create_and_start_container(service).environment
+ for k, v in {
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': 'E3',
+ 'NO_DEF': None
+ }.items():
+ self.assertEqual(env[k], v)
+
+ def test_with_high_enough_api_version_we_get_default_network_mode(self):
+ # TODO: remove this test once minimum docker version is 1.8.x
+ with mock.patch.object(self.client, '_version', '1.20'):
+ service = self.create_service('web')
+ service_config = service._get_container_host_config({})
+ self.assertEqual(service_config['NetworkMode'], 'default')
+
+ def test_labels(self):
+ labels_dict = {
+ 'com.example.description': "Accounting webapp",
+ 'com.example.department': "Finance",
+ 'com.example.label-with-empty-value': "",
+ }
+
+ compose_labels = {
+ LABEL_CONTAINER_NUMBER: '1',
+ LABEL_ONE_OFF: 'False',
+ LABEL_PROJECT: 'composetest',
+ LABEL_SERVICE: 'web',
+ LABEL_VERSION: __version__,
+ }
+ expected = dict(labels_dict, **compose_labels)
+
+ service = self.create_service('web', labels=labels_dict)
+ labels = create_and_start_container(service).labels.items()
+ for pair in expected.items():
+ self.assertIn(pair, labels)
+
+ def test_empty_labels(self):
+ labels_dict = {'foo': '', 'bar': ''}
+ service = self.create_service('web', labels=labels_dict)
+ labels = create_and_start_container(service).labels.items()
+ for name in labels_dict:
+ self.assertIn((name, ''), labels)
+
+ def test_stop_signal(self):
+ stop_signal = 'SIGINT'
+ service = self.create_service('web', stop_signal=stop_signal)
+ container = create_and_start_container(service)
+ self.assertEqual(container.stop_signal, stop_signal)
+
+ def test_custom_container_name(self):
+ service = self.create_service('web', container_name='my-web-container')
+ self.assertEqual(service.custom_container_name, 'my-web-container')
+
+ container = create_and_start_container(service)
+ self.assertEqual(container.name, 'my-web-container')
+
+ one_off_container = service.create_container(one_off=True)
+ self.assertNotEqual(one_off_container.name, 'my-web-container')
+
+ @pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0")
+ def test_log_drive_invalid(self):
+ service = self.create_service('web', logging={'driver': 'xxx'})
+ expected_error_msg = "logger: no log driver named 'xxx' is registered"
+
+ with self.assertRaisesRegexp(APIError, expected_error_msg):
+ create_and_start_container(service)
+
+ def test_log_drive_empty_default_jsonfile(self):
+ service = self.create_service('web')
+ log_config = create_and_start_container(service).log_config
+
+ self.assertEqual('json-file', log_config['Type'])
+ self.assertFalse(log_config['Config'])
+
+ def test_log_drive_none(self):
+ service = self.create_service('web', logging={'driver': 'none'})
+ log_config = create_and_start_container(service).log_config
+
+ self.assertEqual('none', log_config['Type'])
+ self.assertFalse(log_config['Config'])
+
+ def test_devices(self):
+ service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
+ device_config = create_and_start_container(service).get('HostConfig.Devices')
+
+ device_dict = {
+ 'PathOnHost': '/dev/random',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/mapped-random'
+ }
+
+ self.assertEqual(1, len(device_config))
+ self.assertDictEqual(device_dict, device_config[0])
+
+ def test_duplicate_containers(self):
+ service = self.create_service('web')
+
+ options = service._get_container_create_options({}, 1)
+ original = Container.create(service.client, **options)
+
+ self.assertEqual(set(service.containers(stopped=True)), set([original]))
+ self.assertEqual(set(service.duplicate_containers()), set())
+
+ options['name'] = 'temporary_container_name'
+ duplicate = Container.create(service.client, **options)
+
+ self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
+ self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
+
+
+def converge(service, strategy=ConvergenceStrategy.changed):
+ """Create a converge plan from a strategy and execute the plan."""
+ plan = service.convergence_plan(strategy)
+ return service.execute_convergence_plan(plan, timeout=1)
+
+
+class ConfigHashTest(DockerClientTestCase):
+
+ def test_no_config_hash_when_one_off(self):
+ web = self.create_service('web')
+ container = web.create_container(one_off=True)
+ self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+
+ def test_no_config_hash_when_overriding_options(self):
+ web = self.create_service('web')
+ container = web.create_container(environment={'FOO': '1'})
+ self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+
+ def test_config_hash_with_custom_labels(self):
+ web = self.create_service('web', labels={'foo': '1'})
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
+ self.assertIn('foo', container.labels)
+
+ def test_config_hash_sticks_around(self):
+ web = self.create_service('web', command=["top"])
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
+
+ web = self.create_service('web', command=["top", "-d", "1"])
+ container = converge(web)[0]
+ self.assertIn(LABEL_CONFIG_HASH, container.labels)
diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py
new file mode 100644
index 00000000..047dc704
--- /dev/null
+++ b/tests/integration/state_test.py
@@ -0,0 +1,308 @@
+"""
+Integration tests which cover state convergence (aka smart recreate) performed
+by `docker-compose up`.
+"""
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import py
+from docker.errors import ImageNotFound
+
+from .testcases import DockerClientTestCase
+from .testcases import get_links
+from .testcases import no_cluster
+from compose.config import config
+from compose.project import Project
+from compose.service import ConvergenceStrategy
+
+
+class ProjectTestCase(DockerClientTestCase):
+ def run_up(self, cfg, **kwargs):
+ kwargs.setdefault('timeout', 1)
+ kwargs.setdefault('detached', True)
+
+ project = self.make_project(cfg)
+ project.up(**kwargs)
+ return set(project.containers(stopped=True))
+
+ def make_project(self, cfg):
+ details = config.ConfigDetails(
+ 'working_dir',
+ [config.ConfigFile(None, cfg)])
+ return Project.from_config(
+ name='composetest',
+ client=self.client,
+ config_data=config.load(details))
+
+
+class BasicProjectTest(ProjectTestCase):
+ def setUp(self):
+ super(BasicProjectTest, self).setUp()
+
+ self.cfg = {
+ 'db': {'image': 'busybox:latest', 'command': 'top'},
+ 'web': {'image': 'busybox:latest', 'command': 'top'},
+ }
+
+ def test_no_change(self):
+ old_containers = self.run_up(self.cfg)
+ self.assertEqual(len(old_containers), 2)
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ self.assertEqual(old_containers, new_containers)
+
+ def test_partial_change(self):
+ old_containers = self.run_up(self.cfg)
+ old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
+ old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
+
+ self.cfg['web']['command'] = '/bin/true'
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ preserved = list(old_containers & new_containers)
+ self.assertEqual(preserved, [old_db])
+
+ removed = list(old_containers - new_containers)
+ self.assertEqual(removed, [old_web])
+
+ created = list(new_containers - old_containers)
+ self.assertEqual(len(created), 1)
+ self.assertEqual(created[0].name_without_project, 'web_1')
+ self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
+
+ def test_all_change(self):
+ old_containers = self.run_up(self.cfg)
+ self.assertEqual(len(old_containers), 2)
+
+ self.cfg['web']['command'] = '/bin/true'
+ self.cfg['db']['command'] = '/bin/true'
+
+ new_containers = self.run_up(self.cfg)
+ self.assertEqual(len(new_containers), 2)
+
+ unchanged = old_containers & new_containers
+ self.assertEqual(len(unchanged), 0)
+
+ new = new_containers - old_containers
+ self.assertEqual(len(new), 2)
+
+
+class ProjectWithDependenciesTest(ProjectTestCase):
+ def setUp(self):
+ super(ProjectWithDependenciesTest, self).setUp()
+
+ self.cfg = {
+ 'db': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ },
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ 'links': ['db'],
+ },
+ 'nginx': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ 'links': ['web'],
+ },
+ }
+
+ def test_up(self):
+ containers = self.run_up(self.cfg)
+ self.assertEqual(
+ set(c.name_without_project for c in containers),
+ set(['db_1', 'web_1', 'nginx_1']),
+ )
+
+ def test_change_leaf(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['nginx_1']),
+ )
+
+ def test_change_middle(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['web']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['web_1', 'nginx_1']),
+ )
+
+ def test_change_root(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg)
+
+ self.assertEqual(
+ set(c.name_without_project for c in new_containers - old_containers),
+ set(['db_1', 'web_1', 'nginx_1']),
+ )
+
+ def test_change_root_no_recreate(self):
+ old_containers = self.run_up(self.cfg)
+
+ self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(
+ self.cfg,
+ strategy=ConvergenceStrategy.never)
+
+ self.assertEqual(new_containers - old_containers, set())
+
+ def test_service_removed_while_down(self):
+ next_cfg = {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'command': 'tail -f /dev/null',
+ },
+ 'nginx': self.cfg['nginx'],
+ }
+
+ containers = self.run_up(self.cfg)
+ self.assertEqual(len(containers), 3)
+
+ project = self.make_project(self.cfg)
+ project.stop(timeout=1)
+
+ containers = self.run_up(next_cfg)
+ self.assertEqual(len(containers), 2)
+
+ def test_service_recreated_when_dependency_created(self):
+ containers = self.run_up(self.cfg, service_names=['web'], start_deps=False)
+ self.assertEqual(len(containers), 1)
+
+ containers = self.run_up(self.cfg)
+ self.assertEqual(len(containers), 3)
+
+ web, = [c for c in containers if c.service == 'web']
+ nginx, = [c for c in containers if c.service == 'nginx']
+
+ self.assertEqual(set(get_links(web)), {'composetest_db_1', 'db', 'db_1'})
+ self.assertEqual(set(get_links(nginx)), {'composetest_web_1', 'web', 'web_1'})
+
+
+class ServiceStateTest(DockerClientTestCase):
+ """Test cases for Service.convergence_plan."""
+
+ def test_trigger_create(self):
+ web = self.create_service('web')
+ self.assertEqual(('create', []), web.convergence_plan())
+
+ def test_trigger_noop(self):
+ web = self.create_service('web')
+ container = web.create_container()
+ web.start()
+
+ web = self.create_service('web')
+ self.assertEqual(('noop', [container]), web.convergence_plan())
+
+ def test_trigger_start(self):
+ options = dict(command=["top"])
+
+ web = self.create_service('web', **options)
+ web.scale(2)
+
+ containers = web.containers(stopped=True)
+ containers[0].stop()
+ containers[0].inspect()
+
+ self.assertEqual([c.is_running for c in containers], [False, True])
+
+ self.assertEqual(
+ ('start', containers[0:1]),
+ web.convergence_plan(),
+ )
+
+ def test_trigger_recreate_with_config_change(self):
+ web = self.create_service('web', command=["top"])
+ container = web.create_container()
+
+ web = self.create_service('web', command=["top", "-d", "1"])
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_trigger_recreate_with_nonexistent_image_tag(self):
+ web = self.create_service('web', image="busybox:latest")
+ container = web.create_container()
+
+ web = self.create_service('web', image="nonexistent-image")
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_trigger_recreate_with_image_change(self):
+ repo = 'composetest_myimage'
+ tag = 'latest'
+ image = '{}:{}'.format(repo, tag)
+
+ def safe_remove_image(image):
+ try:
+ self.client.remove_image(image)
+ except ImageNotFound:
+ pass
+
+ image_id = self.client.images(name='busybox')[0]['Id']
+ self.client.tag(image_id, repository=repo, tag=tag)
+ self.addCleanup(safe_remove_image, image)
+
+ web = self.create_service('web', image=image)
+ container = web.create_container()
+
+ # update the image
+ c = self.client.create_container(image, ['touch', '/hello.txt'], host_config={})
+
+ # In the case of a cluster, there's a chance we pick up the old image when
+ # calculating the new hash. To circumvent that, untag the old image first
+ # See also: https://github.com/moby/moby/issues/26852
+ self.client.remove_image(image, force=True)
+
+ self.client.commit(c, repository=repo, tag=tag)
+ self.client.remove_container(c)
+
+ web = self.create_service('web', image=image)
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ @no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
+ def test_trigger_recreate_with_build(self):
+ context = py.test.ensuretemp('test_trigger_recreate_with_build')
+ self.addCleanup(context.remove)
+
+ base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
+ dockerfile = context.join('Dockerfile')
+ dockerfile.write(base_image)
+
+ web = self.create_service('web', build={'context': str(context)})
+ container = web.create_container()
+
+ dockerfile.write(base_image + 'CMD echo hello world\n')
+ web.build()
+
+ web = self.create_service('web', build={'context': str(context)})
+ self.assertEqual(('recreate', [container]), web.convergence_plan())
+
+ def test_image_changed_to_build(self):
+ context = py.test.ensuretemp('test_image_changed_to_build')
+ self.addCleanup(context.remove)
+ context.join('Dockerfile').write("""
+ FROM busybox
+ LABEL com.docker.compose.test_image=true
+ """)
+
+ web = self.create_service('web', image='busybox')
+ container = web.create_container()
+
+ web = self.create_service('web', build={'context': str(context)})
+ plan = web.convergence_plan()
+ self.assertEqual(('recreate', [container]), plan)
+ containers = web.execute_convergence_plan(plan)
+ self.assertEqual(len(containers), 1)
diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py
new file mode 100644
index 00000000..b72fb53a
--- /dev/null
+++ b/tests/integration/testcases.py
@@ -0,0 +1,187 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import functools
+import os
+
+import pytest
+from docker.errors import APIError
+from docker.utils import version_lt
+
+from .. import unittest
+from compose.cli.docker_client import docker_client
+from compose.config.config import resolve_environment
+from compose.config.environment import Environment
+from compose.const import API_VERSIONS
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_0 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import LABEL_PROJECT
+from compose.progress_stream import stream_output
+from compose.service import Service
+
+SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
+SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
+SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
+SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
+
+
+def pull_busybox(client):
+ client.pull('busybox:latest', stream=False)
+
+
+def get_links(container):
+ links = container.get('HostConfig.Links') or []
+
+ def format_link(link):
+ _, alias = link.split(':')
+ return alias.split('/')[-1]
+
+ return [format_link(link) for link in links]
+
+
+def engine_max_version():
+ if 'DOCKER_VERSION' not in os.environ:
+ return V3_3
+ version = os.environ['DOCKER_VERSION'].partition('-')[0]
+ if version_lt(version, '1.10'):
+ return V1
+ if version_lt(version, '1.12'):
+ return V2_0
+ if version_lt(version, '1.13'):
+ return V2_1
+ if version_lt(version, '17.06'):
+ return V3_2
+ return V3_3
+
+
+def min_version_skip(version):
+ return pytest.mark.skipif(
+ engine_max_version() < version,
+ reason="Engine version %s is too low" % version
+ )
+
+
+def v2_only():
+ return min_version_skip(V2_0)
+
+
+def v2_1_only():
+ return min_version_skip(V2_1)
+
+
+def v2_2_only():
+ return min_version_skip(V2_2)
+
+
+def v2_3_only():
+ return min_version_skip(V2_3)
+
+
+def v3_only():
+ return min_version_skip(V3_0)
+
+
+class DockerClientTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ version = API_VERSIONS[engine_max_version()]
+ cls.client = docker_client(Environment(), version)
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.client
+
+ def tearDown(self):
+ for c in self.client.containers(
+ all=True,
+ filters={'label': '%s=composetest' % LABEL_PROJECT}):
+ self.client.remove_container(c['Id'], force=True)
+
+ for i in self.client.images(
+ filters={'label': 'com.docker.compose.test_image'}):
+ try:
+ self.client.remove_image(i, force=True)
+ except APIError as e:
+ if e.is_server_error():
+ pass
+
+ volumes = self.client.volumes().get('Volumes') or []
+ for v in volumes:
+ if 'composetest_' in v['Name']:
+ self.client.remove_volume(v['Name'])
+
+ networks = self.client.networks()
+ for n in networks:
+ if 'composetest_' in n['Name']:
+ self.client.remove_network(n['Name'])
+
+ def create_service(self, name, **kwargs):
+ if 'image' not in kwargs and 'build' not in kwargs:
+ kwargs['image'] = 'busybox:latest'
+
+ if 'command' not in kwargs:
+ kwargs['command'] = ["top"]
+
+ kwargs['environment'] = resolve_environment(
+ kwargs, Environment.from_env_file(None)
+ )
+ labels = dict(kwargs.setdefault('labels', {}))
+ labels['com.docker.compose.test-name'] = self.id()
+
+ return Service(name, client=self.client, project='composetest', **kwargs)
+
+ def check_build(self, *args, **kwargs):
+ kwargs.setdefault('rm', True)
+ build_output = self.client.build(*args, **kwargs)
+ stream_output(build_output, open('/dev/null', 'w'))
+
+ def require_api_version(self, minimum):
+ api_version = self.client.version()['ApiVersion']
+ if version_lt(api_version, minimum):
+ pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
+
+ def get_volume_data(self, volume_name):
+ if not is_cluster(self.client):
+ return self.client.inspect_volume(volume_name)
+
+ volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
+ assert len(volumes) > 0
+ return self.client.inspect_volume(volumes[0]['Name'])
+
+
+def is_cluster(client):
+ if SWARM_ASSUME_MULTINODE:
+ return True
+
+ def get_nodes_number():
+ try:
+ return len(client.nodes())
+ except APIError:
+ # If the Engine is not part of a Swarm, the SDK will raise
+ # an APIError
+ return 0
+
+ if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
+ # Only make the API call if the value hasn't been cached yet
+ is_cluster.nodes = get_nodes_number()
+
+ return is_cluster.nodes > 1
+
+
+def no_cluster(reason):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if is_cluster(self.client):
+ pytest.skip("Test will not be run in cluster mode: %s" % reason)
+ return
+ return f(self, *args, **kwargs)
+ return wrapper
+
+ return decorator
diff --git a/tests/integration/volume_test.py b/tests/integration/volume_test.py
new file mode 100644
index 00000000..2a521d4c
--- /dev/null
+++ b/tests/integration/volume_test.py
@@ -0,0 +1,126 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+from docker.errors import DockerException
+
+from .testcases import DockerClientTestCase
+from .testcases import no_cluster
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_VOLUME
+from compose.volume import Volume
+
+
+class VolumeTest(DockerClientTestCase):
+ def setUp(self):
+ self.tmp_volumes = []
+
+ def tearDown(self):
+ for volume in self.tmp_volumes:
+ try:
+ self.client.remove_volume(volume.full_name)
+ except DockerException:
+ pass
+ del self.tmp_volumes
+ super(VolumeTest, self).tearDown()
+
+ def create_volume(self, name, driver=None, opts=None, external=None, custom_name=False):
+ if external:
+ custom_name = True
+ if isinstance(external, six.text_type):
+ name = external
+
+ vol = Volume(
+ self.client, 'composetest', name, driver=driver, driver_opts=opts,
+ external=bool(external), custom_name=custom_name
+ )
+ self.tmp_volumes.append(vol)
+ return vol
+
+ def test_create_volume(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ def test_create_volume_custom_name(self):
+ vol = self.create_volume('volume01', custom_name=True)
+ assert vol.name == vol.full_name
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.name
+
+ def test_recreate_existing_volume(self):
+ vol = self.create_volume('volume01')
+
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_inspect_volume(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == vol.full_name
+
+ @no_cluster('remove volume by name defect on Swarm Classic')
+ def test_remove_volume(self):
+ vol = Volume(self.client, 'composetest', 'volume01')
+ vol.create()
+ vol.remove()
+ volumes = self.client.volumes()['Volumes']
+ assert len([v for v in volumes if v['Name'] == vol.full_name]) == 0
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_external_volume(self):
+ vol = self.create_volume('composetest_volume_ext', external=True)
+ assert vol.external is True
+ assert vol.full_name == vol.name
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == vol.name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_external_aliased_volume(self):
+ alias_name = 'composetest_alias01'
+ vol = self.create_volume('volume01', external=alias_name)
+ assert vol.external is True
+ assert vol.full_name == alias_name
+ vol.create()
+ info = vol.inspect()
+ assert info['Name'] == alias_name
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists(self):
+ vol = self.create_volume('volume01')
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists_external(self):
+ vol = self.create_volume('volume01', external=True)
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_exists_external_aliased(self):
+ vol = self.create_volume('volume01', external='composetest_alias01')
+ assert vol.exists() is False
+ vol.create()
+ assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_volume_default_labels(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ vol_data = vol.inspect()
+ labels = vol_data['Labels']
+ assert labels[LABEL_VOLUME] == vol.name
+ assert labels[LABEL_PROJECT] == vol.project
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/__init__.py
diff --git a/tests/unit/bundle_test.py b/tests/unit/bundle_test.py
new file mode 100644
index 00000000..84779520
--- /dev/null
+++ b/tests/unit/bundle_test.py
@@ -0,0 +1,222 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import mock
+import pytest
+
+from compose import bundle
+from compose import service
+from compose.cli.errors import UserError
+from compose.config.config import Config
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+@pytest.fixture
+def mock_service():
+ return mock.create_autospec(
+ service.Service,
+ client=mock.create_autospec(docker.APIClient),
+ options={})
+
+
+def test_get_image_digest_exists(mock_service):
+ mock_service.options['image'] = 'abcd'
+ mock_service.image.return_value = {'RepoDigests': ['digest1']}
+ digest = bundle.get_image_digest(mock_service)
+ assert digest == 'digest1'
+
+
+def test_get_image_digest_image_uses_digest(mock_service):
+ mock_service.options['image'] = image_id = 'redis@sha256:digest'
+
+ digest = bundle.get_image_digest(mock_service)
+ assert digest == image_id
+ assert not mock_service.image.called
+
+
+def test_get_image_digest_no_image(mock_service):
+ with pytest.raises(UserError) as exc:
+ bundle.get_image_digest(service.Service(name='theservice'))
+
+ assert "doesn't define an image tag" in exc.exconly()
+
+
+def test_push_image_with_saved_digest(mock_service):
+ mock_service.options['build'] = '.'
+ mock_service.options['image'] = image_id = 'abcd'
+ mock_service.push.return_value = expected = 'sha256:thedigest'
+ mock_service.image.return_value = {'RepoDigests': ['digest1']}
+
+ digest = bundle.push_image(mock_service)
+ assert digest == image_id + '@' + expected
+
+ mock_service.push.assert_called_once_with()
+ assert not mock_service.client.push.called
+
+
+def test_push_image(mock_service):
+ mock_service.options['build'] = '.'
+ mock_service.options['image'] = image_id = 'abcd'
+ mock_service.push.return_value = expected = 'sha256:thedigest'
+ mock_service.image.return_value = {'RepoDigests': []}
+
+ digest = bundle.push_image(mock_service)
+ assert digest == image_id + '@' + expected
+
+ mock_service.push.assert_called_once_with()
+ mock_service.client.pull.assert_called_once_with(digest)
+
+
+def test_to_bundle():
+ image_digests = {'a': 'aaaa', 'b': 'bbbb'}
+ services = [
+ {'name': 'a', 'build': '.', },
+ {'name': 'b', 'build': './b'},
+ ]
+ config = Config(
+ version=V2_0,
+ services=services,
+ volumes={'special': {}},
+ networks={'extra': {}},
+ secrets={},
+ configs={}
+ )
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ output = bundle.to_bundle(config, image_digests)
+
+ assert mock_log.mock_calls == [
+ mock.call("Unsupported top level key 'networks' - ignoring"),
+ mock.call("Unsupported top level key 'volumes' - ignoring"),
+ ]
+
+ assert output == {
+ 'Version': '0.1',
+ 'Services': {
+ 'a': {'Image': 'aaaa', 'Networks': ['default']},
+ 'b': {'Image': 'bbbb', 'Networks': ['default']},
+ }
+ }
+
+
+def test_convert_service_to_bundle():
+ name = 'theservice'
+ image_digest = 'thedigest'
+ service_dict = {
+ 'ports': ['80'],
+ 'expose': ['1234'],
+ 'networks': {'extra': {}},
+ 'command': 'foo',
+ 'entrypoint': 'entry',
+ 'environment': {'BAZ': 'ENV'},
+ 'build': '.',
+ 'working_dir': '/tmp',
+ 'user': 'root',
+ 'labels': {'FOO': 'LABEL'},
+ 'privileged': True,
+ }
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
+
+ mock_log.assert_called_once_with(
+ "Unsupported key 'privileged' in services.theservice - ignoring")
+
+ assert config == {
+ 'Image': image_digest,
+ 'Ports': [
+ {'Protocol': 'tcp', 'Port': 80},
+ {'Protocol': 'tcp', 'Port': 1234},
+ ],
+ 'Networks': ['extra'],
+ 'Command': ['entry', 'foo'],
+ 'Env': ['BAZ=ENV'],
+ 'WorkingDir': '/tmp',
+ 'User': 'root',
+ 'Labels': {'FOO': 'LABEL'},
+ }
+
+
+def test_set_command_and_args_none():
+ config = {}
+ bundle.set_command_and_args(config, [], [])
+ assert config == {}
+
+
+def test_set_command_and_args_from_command():
+ config = {}
+ bundle.set_command_and_args(config, [], "echo ok")
+ assert config == {'Args': ['echo', 'ok']}
+
+
+def test_set_command_and_args_from_entrypoint():
+ config = {}
+ bundle.set_command_and_args(config, "echo entry", [])
+ assert config == {'Command': ['echo', 'entry']}
+
+
+def test_set_command_and_args_from_both():
+ config = {}
+ bundle.set_command_and_args(config, "echo entry", ["extra", "arg"])
+ assert config == {'Command': ['echo', 'entry', "extra", "arg"]}
+
+
+def test_make_service_networks_default():
+ name = 'theservice'
+ service_dict = {}
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ networks = bundle.make_service_networks(name, service_dict)
+
+ assert not mock_log.called
+ assert networks == ['default']
+
+
+def test_make_service_networks():
+ name = 'theservice'
+ service_dict = {
+ 'networks': {
+ 'foo': {
+ 'aliases': ['one', 'two'],
+ },
+ 'bar': {}
+ },
+ }
+
+ with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ networks = bundle.make_service_networks(name, service_dict)
+
+ mock_log.assert_called_once_with(
+ "Unsupported key 'aliases' in services.theservice.networks.foo - ignoring")
+ assert sorted(networks) == sorted(service_dict['networks'])
+
+
+def test_make_port_specs():
+ service_dict = {
+ 'expose': ['80', '500/udp'],
+ 'ports': [
+ '400:80',
+ '222',
+ '127.0.0.1:8001:8001',
+ '127.0.0.1:5000-5001:3000-3001'],
+ }
+ port_specs = bundle.make_port_specs(service_dict)
+ assert port_specs == [
+ {'Protocol': 'tcp', 'Port': 80},
+ {'Protocol': 'tcp', 'Port': 222},
+ {'Protocol': 'tcp', 'Port': 8001},
+ {'Protocol': 'tcp', 'Port': 3000},
+ {'Protocol': 'tcp', 'Port': 3001},
+ {'Protocol': 'udp', 'Port': 500},
+ ]
+
+
+def test_make_port_spec_with_protocol():
+ port_spec = bundle.make_port_spec("5000/udp")
+ assert port_spec == {'Protocol': 'udp', 'Port': 5000}
+
+
+def test_make_port_spec_default_protocol():
+ port_spec = bundle.make_port_spec("50000")
+ assert port_spec == {'Protocol': 'tcp', 'Port': 50000}
diff --git a/tests/unit/cli/__init__.py b/tests/unit/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/cli/__init__.py
diff --git a/tests/unit/cli/command_test.py b/tests/unit/cli/command_test.py
new file mode 100644
index 00000000..3a9844c4
--- /dev/null
+++ b/tests/unit/cli/command_test.py
@@ -0,0 +1,76 @@
+# ~*~ encoding: utf-8 ~*~
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+
+import pytest
+import six
+
+from compose.cli.command import get_config_path_from_options
+from compose.config.environment import Environment
+from compose.const import IS_WINDOWS_PLATFORM
+from tests import mock
+
+
+class TestGetConfigPathFromOptions(object):
+
+ def test_path_from_options(self):
+ paths = ['one.yml', 'two.yml']
+ opts = {'--file': paths}
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options('.', opts, environment) == paths
+
+ def test_single_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options('.', {}, environment) == ['one.yml']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator')
+ def test_multiple_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml:two.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['one.yml', 'two.yml']
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator')
+ def test_multiple_path_from_env_windows(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = 'one.yml;two.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['one.yml', 'two.yml']
+
+ def test_multiple_path_from_env_custom_separator(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
+ os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['c:\\one.yml', '.\\semi;colon.yml']
+
+ def test_no_path(self):
+ environment = Environment.from_env_file('.')
+ assert not get_config_path_from_options('.', {}, environment)
+
+ def test_unicode_path_from_options(self):
+ paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
+ opts = {'--file': paths}
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', opts, environment
+ ) == ['就吃饭/docker-compose.yml']
+
+ @pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
+ def test_unicode_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['就吃饭/docker-compose.yml']
diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py
new file mode 100644
index 00000000..482ad985
--- /dev/null
+++ b/tests/unit/cli/docker_client_test.py
@@ -0,0 +1,187 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import platform
+import ssl
+
+import docker
+import pytest
+
+import compose
+from compose.cli import errors
+from compose.cli.docker_client import docker_client
+from compose.cli.docker_client import get_tls_version
+from compose.cli.docker_client import tls_config_from_options
+from tests import mock
+from tests import unittest
+
+
+class DockerClientTestCase(unittest.TestCase):
+
+ def test_docker_client_no_home(self):
+ with mock.patch.dict(os.environ):
+ del os.environ['HOME']
+ docker_client(os.environ)
+
+ @mock.patch.dict(os.environ)
+ def test_docker_client_with_custom_timeout(self):
+ os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
+ client = docker_client(os.environ)
+ assert client.timeout == 123
+
+ @mock.patch.dict(os.environ)
+ def test_custom_timeout_error(self):
+ os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
+ client = docker_client(os.environ)
+
+ with mock.patch('compose.cli.errors.log') as fake_log:
+ with pytest.raises(errors.ConnectionError):
+ with errors.handle_connection_errors(client):
+ raise errors.RequestsConnectionError(
+ errors.ReadTimeoutError(None, None, None))
+
+ assert fake_log.error.call_count == 1
+ assert '123' in fake_log.error.call_args[0][0]
+
+ with mock.patch('compose.cli.errors.log') as fake_log:
+ with pytest.raises(errors.ConnectionError):
+ with errors.handle_connection_errors(client):
+ raise errors.ReadTimeout()
+
+ assert fake_log.error.call_count == 1
+ assert '123' in fake_log.error.call_args[0][0]
+
+ def test_user_agent(self):
+ client = docker_client(os.environ)
+ expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
+ compose.__version__,
+ docker.__version__,
+ platform.system(),
+ platform.release()
+ )
+ self.assertEqual(client.headers['User-Agent'], expected)
+
+
+class TLSConfigTestCase(unittest.TestCase):
+ ca_cert = 'tests/fixtures/tls/ca.pem'
+ client_cert = 'tests/fixtures/tls/cert.pem'
+ key = 'tests/fixtures/tls/key.key'
+
+ def test_simple_tls(self):
+ options = {'--tls': True}
+ result = tls_config_from_options(options)
+ assert result is True
+
+ def test_tls_ca_cert(self):
+ options = {
+ '--tlscacert': self.ca_cert, '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_ca_cert_explicit(self):
+ options = {
+ '--tlscacert': self.ca_cert, '--tls': True,
+ '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_cert(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+
+ def test_tls_client_cert_explicit(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tls': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+
+ def test_tls_client_and_ca(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tlsverify': True, '--tlscacert': self.ca_cert
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_and_ca_explicit(self):
+ options = {
+ '--tlscert': self.client_cert, '--tlskey': self.key,
+ '--tlsverify': True, '--tlscacert': self.ca_cert,
+ '--tls': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (options['--tlscert'], options['--tlskey'])
+ assert result.ca_cert == options['--tlscacert']
+ assert result.verify is True
+
+ def test_tls_client_missing_key(self):
+ options = {'--tlscert': self.client_cert}
+ with pytest.raises(docker.errors.TLSParameterError):
+ tls_config_from_options(options)
+
+ options = {'--tlskey': self.key}
+ with pytest.raises(docker.errors.TLSParameterError):
+ tls_config_from_options(options)
+
+ def test_assert_hostname_explicit_skip(self):
+ options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True}
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.assert_hostname is False
+
+ def test_tls_client_and_ca_quoted_paths(self):
+ options = {
+ '--tlscacert': '"{0}"'.format(self.ca_cert),
+ '--tlscert': '"{0}"'.format(self.client_cert),
+ '--tlskey': '"{0}"'.format(self.key),
+ '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (self.client_cert, self.key)
+ assert result.ca_cert == self.ca_cert
+ assert result.verify is True
+
+ def test_tls_simple_with_tls_version(self):
+ tls_version = 'TLSv1'
+ options = {'--tls': True}
+ environment = {'COMPOSE_TLS_VERSION': tls_version}
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ssl_version == ssl.PROTOCOL_TLSv1
+
+
+class TestGetTlsVersion(object):
+ def test_get_tls_version_default(self):
+ environment = {}
+ assert get_tls_version(environment) is None
+
+ @pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
+ def test_get_tls_version_upgrade(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
+ assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
+
+ def test_get_tls_version_unavailable(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
+ with mock.patch('compose.cli.docker_client.log') as mock_log:
+ tls_version = get_tls_version(environment)
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ assert tls_version is None
diff --git a/tests/unit/cli/errors_test.py b/tests/unit/cli/errors_test.py
new file mode 100644
index 00000000..68326d1c
--- /dev/null
+++ b/tests/unit/cli/errors_test.py
@@ -0,0 +1,88 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+from docker.errors import APIError
+from requests.exceptions import ConnectionError
+
+from compose.cli import errors
+from compose.cli.errors import handle_connection_errors
+from compose.const import IS_WINDOWS_PLATFORM
+from tests import mock
+
+
+@pytest.yield_fixture
+def mock_logging():
+ with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
+ yield mock_log
+
+
+def patch_find_executable(side_effect):
+ return mock.patch(
+ 'compose.cli.errors.find_executable',
+ autospec=True,
+ side_effect=side_effect)
+
+
+class TestHandleConnectionErrors(object):
+
+ def test_generic_connection_error(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with patch_find_executable(['/bin/docker', None]):
+ with handle_connection_errors(mock.Mock()):
+ raise ConnectionError()
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Couldn't connect to Docker daemon" in args[0]
+
+ def test_api_error_version_mismatch(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, b"client is newer than server")
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Docker Engine of version 1.10.0 or greater" in args[0]
+
+ def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, u"client is newer than server")
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Docker Engine of version 1.10.0 or greater" in args[0]
+
+ def test_api_error_version_other(self, mock_logging):
+ msg = b"Something broke!"
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, msg)
+
+ mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
+
+ def test_api_error_version_other_unicode_explanation(self, mock_logging):
+ msg = u"Something broke!"
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, msg)
+
+ mock_logging.error.assert_called_once_with(msg)
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_no_data(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "The current Compose file version is not compatible with your engine version." in args[0]
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_misc(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
diff --git a/tests/unit/cli/formatter_test.py b/tests/unit/cli/formatter_test.py
new file mode 100644
index 00000000..4aa025e6
--- /dev/null
+++ b/tests/unit/cli/formatter_test.py
@@ -0,0 +1,53 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+from compose.cli import colors
+from compose.cli.formatter import ConsoleWarningFormatter
+from tests import unittest
+
+
+MESSAGE = 'this is the message'
+
+
+def make_log_record(level, message=None):
+ return logging.LogRecord('name', level, 'pathame', 0, message or MESSAGE, (), None)
+
+
+class ConsoleWarningFormatterTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.formatter = ConsoleWarningFormatter()
+
+ def test_format_warn(self):
+ output = self.formatter.format(make_log_record(logging.WARN))
+ expected = colors.yellow('WARNING') + ': '
+ assert output == expected + MESSAGE
+
+ def test_format_error(self):
+ output = self.formatter.format(make_log_record(logging.ERROR))
+ expected = colors.red('ERROR') + ': '
+ assert output == expected + MESSAGE
+
+ def test_format_info(self):
+ output = self.formatter.format(make_log_record(logging.INFO))
+ assert output == MESSAGE
+
+ def test_format_unicode_info(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.INFO, message))
+ print(output)
+ assert output == message.decode('utf-8')
+
+ def test_format_unicode_warn(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.WARN, message))
+ expected = colors.yellow('WARNING') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
+
+ def test_format_unicode_error(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.ERROR, message))
+ expected = colors.red('ERROR') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py
new file mode 100644
index 00000000..d0c4b56b
--- /dev/null
+++ b/tests/unit/cli/log_printer_test.py
@@ -0,0 +1,201 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import itertools
+
+import pytest
+import requests
+import six
+from docker.errors import APIError
+from six.moves.queue import Queue
+
+from compose.cli.log_printer import build_log_generator
+from compose.cli.log_printer import build_log_presenters
+from compose.cli.log_printer import build_no_log_generator
+from compose.cli.log_printer import consume_queue
+from compose.cli.log_printer import QueueItem
+from compose.cli.log_printer import wait_on_exit
+from compose.cli.log_printer import watch_events
+from compose.container import Container
+from tests import mock
+
+
+@pytest.fixture
+def output_stream():
+ output = six.StringIO()
+ output.flush = mock.Mock()
+ return output
+
+
+@pytest.fixture
+def mock_container():
+ return mock.Mock(spec=Container, name_without_project='web_1')
+
+
+class TestLogPresenter(object):
+
+ def test_monochrome(self, mock_container):
+ presenters = build_log_presenters(['foo', 'bar'], True)
+ presenter = next(presenters)
+ actual = presenter.present(mock_container, "this line")
+ assert actual == "web_1 | this line"
+
+ def test_polychrome(self, mock_container):
+ presenters = build_log_presenters(['foo', 'bar'], False)
+ presenter = next(presenters)
+ actual = presenter.present(mock_container, "this line")
+ assert '\033[' in actual
+
+
+def test_wait_on_exit():
+ exit_status = 3
+ mock_container = mock.Mock(
+ spec=Container,
+ name='cname',
+ wait=mock.Mock(return_value=exit_status))
+
+ expected = '{} exited with code {}\n'.format(mock_container.name, exit_status)
+ assert expected == wait_on_exit(mock_container)
+
+
+def test_wait_on_exit_raises():
+ status_code = 500
+
+ def mock_wait():
+ resp = requests.Response()
+ resp.status_code = status_code
+ raise APIError('Bad server', resp)
+
+ mock_container = mock.Mock(
+ spec=Container,
+ name='cname',
+ wait=mock_wait
+ )
+
+ expected = 'Unexpected API error for {} (HTTP code {})\n'.format(
+ mock_container.name, status_code,
+ )
+ assert expected in wait_on_exit(mock_container)
+
+
+def test_build_no_log_generator(mock_container):
+ mock_container.has_api_logs = False
+ mock_container.log_driver = 'none'
+ output, = build_no_log_generator(mock_container, None)
+ assert "WARNING: no logs are available with the 'none' log driver\n" in output
+ assert "exited with code" not in output
+
+
+class TestBuildLogGenerator(object):
+
+ def test_no_log_stream(self, mock_container):
+ mock_container.log_stream = None
+ mock_container.logs.return_value = iter([b"hello\nworld"])
+ log_args = {'follow': True}
+
+ generator = build_log_generator(mock_container, log_args)
+ assert next(generator) == "hello\n"
+ assert next(generator) == "world"
+ mock_container.logs.assert_called_once_with(
+ stdout=True,
+ stderr=True,
+ stream=True,
+ **log_args)
+
+ def test_with_log_stream(self, mock_container):
+ mock_container.log_stream = iter([b"hello\nworld"])
+ log_args = {'follow': True}
+
+ generator = build_log_generator(mock_container, log_args)
+ assert next(generator) == "hello\n"
+ assert next(generator) == "world"
+
+ def test_unicode(self, output_stream):
+ glyph = u'\u2022\n'
+ mock_container.log_stream = iter([glyph.encode('utf-8')])
+
+ generator = build_log_generator(mock_container, {})
+ assert next(generator) == glyph
+
+
+@pytest.fixture
+def thread_map():
+ return {'cid': mock.Mock()}
+
+
+@pytest.fixture
+def mock_presenters():
+ return itertools.cycle([mock.Mock()])
+
+
+class TestWatchEvents(object):
+
+ def test_stop_event(self, thread_map, mock_presenters):
+ event_stream = [{'action': 'stop', 'id': 'cid'}]
+ watch_events(thread_map, event_stream, mock_presenters, ())
+ assert not thread_map
+
+ def test_start_event(self, thread_map, mock_presenters):
+ container_id = 'abcd'
+ event = {'action': 'start', 'id': container_id, 'container': mock.Mock()}
+ event_stream = [event]
+ thread_args = 'foo', 'bar'
+
+ with mock.patch(
+ 'compose.cli.log_printer.build_thread',
+ autospec=True
+ ) as mock_build_thread:
+ watch_events(thread_map, event_stream, mock_presenters, thread_args)
+ mock_build_thread.assert_called_once_with(
+ event['container'],
+ next(mock_presenters),
+ *thread_args)
+ assert container_id in thread_map
+
+ def test_other_event(self, thread_map, mock_presenters):
+ container_id = 'abcd'
+ event_stream = [{'action': 'create', 'id': container_id}]
+ watch_events(thread_map, event_stream, mock_presenters, ())
+ assert container_id not in thread_map
+
+
+class TestConsumeQueue(object):
+
+ def test_item_is_an_exception(self):
+
+ class Problem(Exception):
+ pass
+
+ queue = Queue()
+ error = Problem('oops')
+ for item in QueueItem.new('a'), QueueItem.new('b'), QueueItem.exception(error):
+ queue.put(item)
+
+ generator = consume_queue(queue, False)
+ assert next(generator) == 'a'
+ assert next(generator) == 'b'
+ with pytest.raises(Problem):
+ next(generator)
+
+ def test_item_is_stop_without_cascade_stop(self):
+ queue = Queue()
+ for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'):
+ queue.put(item)
+
+ generator = consume_queue(queue, False)
+ assert next(generator) == 'a'
+ assert next(generator) == 'b'
+
+ def test_item_is_stop_with_cascade_stop(self):
+ """Return the name of the container that caused the cascade_stop"""
+ queue = Queue()
+ for item in QueueItem.stop('foobar-1'), QueueItem.new('a'), QueueItem.new('b'):
+ queue.put(item)
+
+ generator = consume_queue(queue, True)
+ assert next(generator) is 'foobar-1'
+
+ def test_item_is_none_when_timeout_is_hit(self):
+ queue = Queue()
+ generator = consume_queue(queue, False)
+ assert next(generator) is None
diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py
new file mode 100644
index 00000000..dc527880
--- /dev/null
+++ b/tests/unit/cli/main_test.py
@@ -0,0 +1,104 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+
+import pytest
+
+from compose import container
+from compose.cli.errors import UserError
+from compose.cli.formatter import ConsoleWarningFormatter
+from compose.cli.main import convergence_strategy_from_opts
+from compose.cli.main import filter_containers_to_service_names
+from compose.cli.main import setup_console_handler
+from compose.service import ConvergenceStrategy
+from tests import mock
+
+
+def mock_container(service, number):
+ return mock.create_autospec(
+ container.Container,
+ service=service,
+ number=number,
+ name_without_project='{0}_{1}'.format(service, number))
+
+
+@pytest.fixture
+def logging_handler():
+ stream = mock.Mock()
+ stream.isatty.return_value = True
+ return logging.StreamHandler(stream=stream)
+
+
+class TestCLIMainTestCase(object):
+
+ def test_filter_containers_to_service_names(self):
+ containers = [
+ mock_container('web', 1),
+ mock_container('web', 2),
+ mock_container('db', 1),
+ mock_container('other', 1),
+ mock_container('another', 1),
+ ]
+ service_names = ['web', 'db']
+ actual = filter_containers_to_service_names(containers, service_names)
+ assert actual == containers[:3]
+
+ def test_filter_containers_to_service_names_all(self):
+ containers = [
+ mock_container('web', 1),
+ mock_container('db', 1),
+ mock_container('other', 1),
+ ]
+ service_names = []
+ actual = filter_containers_to_service_names(containers, service_names)
+ assert actual == containers
+
+
+class TestSetupConsoleHandlerTestCase(object):
+
+ def test_with_tty_verbose(self, logging_handler):
+ setup_console_handler(logging_handler, True)
+ assert type(logging_handler.formatter) == ConsoleWarningFormatter
+ assert '%(name)s' in logging_handler.formatter._fmt
+ assert '%(funcName)s' in logging_handler.formatter._fmt
+
+ def test_with_tty_not_verbose(self, logging_handler):
+ setup_console_handler(logging_handler, False)
+ assert type(logging_handler.formatter) == ConsoleWarningFormatter
+ assert '%(name)s' not in logging_handler.formatter._fmt
+ assert '%(funcName)s' not in logging_handler.formatter._fmt
+
+ def test_with_not_a_tty(self, logging_handler):
+ logging_handler.stream.isatty.return_value = False
+ setup_console_handler(logging_handler, False)
+ assert type(logging_handler.formatter) == logging.Formatter
+
+
+class TestConvergeStrategyFromOptsTestCase(object):
+
+ def test_invalid_opts(self):
+ options = {'--force-recreate': True, '--no-recreate': True}
+ with pytest.raises(UserError):
+ convergence_strategy_from_opts(options)
+
+ def test_always(self):
+ options = {'--force-recreate': True, '--no-recreate': False}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.always
+ )
+
+ def test_never(self):
+ options = {'--force-recreate': False, '--no-recreate': True}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.never
+ )
+
+ def test_changed(self):
+ options = {'--force-recreate': False, '--no-recreate': False}
+ assert (
+ convergence_strategy_from_opts(options) ==
+ ConvergenceStrategy.changed
+ )
diff --git a/tests/unit/cli/utils_test.py b/tests/unit/cli/utils_test.py
new file mode 100644
index 00000000..066fb359
--- /dev/null
+++ b/tests/unit/cli/utils_test.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import unittest
+
+from compose.cli.utils import unquote_path
+
+
+class UnquotePathTest(unittest.TestCase):
+ def test_no_quotes(self):
+ assert unquote_path('hello') == 'hello'
+
+ def test_simple_quotes(self):
+ assert unquote_path('"hello"') == 'hello'
+
+ def test_uneven_quotes(self):
+ assert unquote_path('"hello') == '"hello'
+ assert unquote_path('hello"') == 'hello"'
+
+ def test_nested_quotes(self):
+ assert unquote_path('""hello""') == '"hello"'
+ assert unquote_path('"hel"lo"') == 'hel"lo'
+ assert unquote_path('"hello""') == 'hello"'
diff --git a/tests/unit/cli/verbose_proxy_test.py b/tests/unit/cli/verbose_proxy_test.py
new file mode 100644
index 00000000..f77568dc
--- /dev/null
+++ b/tests/unit/cli/verbose_proxy_test.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import six
+
+from compose.cli import verbose_proxy
+from tests import unittest
+
+
+class VerboseProxyTestCase(unittest.TestCase):
+
+ def test_format_call(self):
+ prefix = '' if six.PY3 else 'u'
+ expected = "(%(p)s'arg1', True, key=%(p)s'value')" % dict(p=prefix)
+ actual = verbose_proxy.format_call(
+ ("arg1", True),
+ {'key': 'value'})
+
+ self.assertEqual(expected, actual)
+
+ def test_format_return_sequence(self):
+ expected = "(list with 10 items)"
+ actual = verbose_proxy.format_return(list(range(10)), 2)
+ self.assertEqual(expected, actual)
+
+ def test_format_return(self):
+ expected = repr({'Id': 'ok'})
+ actual = verbose_proxy.format_return({'Id': 'ok'}, 2)
+ self.assertEqual(expected, actual)
+
+ def test_format_return_no_result(self):
+ actual = verbose_proxy.format_return(None, 2)
+ self.assertEqual(None, actual)
diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py
new file mode 100644
index 00000000..f9ce240a
--- /dev/null
+++ b/tests/unit/cli_test.py
@@ -0,0 +1,214 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from io import StringIO
+
+import docker
+import py
+import pytest
+
+from .. import mock
+from .. import unittest
+from ..helpers import build_config
+from compose.cli.command import get_project
+from compose.cli.command import get_project_name
+from compose.cli.docopt_command import NoSuchCommand
+from compose.cli.errors import UserError
+from compose.cli.main import TopLevelCommand
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.project import Project
+
+
+class CLITestCase(unittest.TestCase):
+
+ def test_default_project_name(self):
+ test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
+ with test_dir.as_cwd():
+ project_name = get_project_name('.')
+ self.assertEqual('simplecomposefile', project_name)
+
+ def test_project_name_with_explicit_base_dir(self):
+ base_dir = 'tests/fixtures/simple-composefile'
+ project_name = get_project_name(base_dir)
+ self.assertEqual('simplecomposefile', project_name)
+
+ def test_project_name_with_explicit_uppercase_base_dir(self):
+ base_dir = 'tests/fixtures/UpperCaseDir'
+ project_name = get_project_name(base_dir)
+ self.assertEqual('uppercasedir', project_name)
+
+ def test_project_name_with_explicit_project_name(self):
+ name = 'explicit-project-name'
+ project_name = get_project_name(None, project_name=name)
+ self.assertEqual('explicitprojectname', project_name)
+
+ @mock.patch.dict(os.environ)
+ def test_project_name_from_environment_new_var(self):
+ name = 'namefromenv'
+ os.environ['COMPOSE_PROJECT_NAME'] = name
+ project_name = get_project_name(None)
+ self.assertEqual(project_name, name)
+
+ def test_project_name_with_empty_environment_var(self):
+ base_dir = 'tests/fixtures/simple-composefile'
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_PROJECT_NAME'] = ''
+ project_name = get_project_name(base_dir)
+ self.assertEqual('simplecomposefile', project_name)
+
+ @mock.patch.dict(os.environ)
+ def test_project_name_with_environment_file(self):
+ base_dir = tempfile.mkdtemp()
+ try:
+ name = 'namefromenvfile'
+ with open(os.path.join(base_dir, '.env'), 'w') as f:
+ f.write('COMPOSE_PROJECT_NAME={}'.format(name))
+ project_name = get_project_name(base_dir)
+ assert project_name == name
+
+ # Environment has priority over .env file
+ os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv'
+ assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME']
+ finally:
+ shutil.rmtree(base_dir)
+
+ def test_get_project(self):
+ base_dir = 'tests/fixtures/longer-filename-composefile'
+ project = get_project(base_dir)
+ self.assertEqual(project.name, 'longerfilenamecomposefile')
+ self.assertTrue(project.client)
+ self.assertTrue(project.services)
+
+ def test_command_help(self):
+ with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
+ TopLevelCommand.help({'COMMAND': 'up'})
+
+ assert "Usage: up" in fake_stdout.getvalue()
+
+ def test_command_help_nonexistent(self):
+ with pytest.raises(NoSuchCommand):
+ TopLevelCommand.help({'COMMAND': 'nonexistent'})
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
+ @mock.patch('compose.cli.main.RunOperation', autospec=True)
+ @mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
+ def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
+ mock_client = mock.create_autospec(docker.APIClient)
+ project = Project.from_config(
+ name='composetest',
+ client=mock_client,
+ config_data=build_config({
+ 'service': {'image': 'busybox'}
+ }),
+ )
+ command = TopLevelCommand(project)
+
+ with pytest.raises(SystemExit):
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': False,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': None,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ _, _, call_kwargs = mock_run_operation.mock_calls[0]
+ assert call_kwargs['logs'] is False
+
+ def test_run_service_with_restart_always(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+
+ project = Project.from_config(
+ name='composetest',
+ client=mock_client,
+ config_data=build_config({
+ 'service': {
+ 'image': 'busybox',
+ 'restart': 'always',
+ }
+ }),
+ )
+
+ command = TopLevelCommand(project)
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': None,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ self.assertEqual(
+ mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
+ 'always'
+ )
+
+ command = TopLevelCommand(project)
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': None,
+ '--publish': [],
+ '--volume': [],
+ '--rm': True,
+ '--name': None,
+ '--workdir': None,
+ })
+
+ self.assertFalse(
+ mock_client.create_host_config.call_args[1].get('restart_policy')
+ )
+
+ def test_command_manual_and_service_ports_together(self):
+ project = Project.from_config(
+ name='composetest',
+ client=None,
+ config_data=build_config({
+ 'service': {'image': 'busybox'},
+ }),
+ )
+ command = TopLevelCommand(project)
+
+ with self.assertRaises(UserError):
+ command.run({
+ 'SERVICE': 'service',
+ 'COMMAND': None,
+ '-e': [],
+ '--user': None,
+ '--no-deps': None,
+ '-d': True,
+ '-T': None,
+ '--entrypoint': None,
+ '--service-ports': True,
+ '--publish': ['80:80'],
+ '--rm': None,
+ '--name': None,
+ })
diff --git a/tests/unit/config/__init__.py b/tests/unit/config/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/config/__init__.py
diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py
new file mode 100644
index 00000000..8e3d4e2e
--- /dev/null
+++ b/tests/unit/config/config_test.py
@@ -0,0 +1,4482 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import shutil
+import tempfile
+from operator import itemgetter
+
+import py
+import pytest
+import yaml
+
+from ...helpers import build_config_details
+from compose.config import config
+from compose.config import types
+from compose.config.config import resolve_build_args
+from compose.config.config import resolve_environment
+from compose.config.environment import Environment
+from compose.config.errors import ConfigurationError
+from compose.config.errors import VERSION_EXPLANATION
+from compose.config.serialize import denormalize_service_dict
+from compose.config.serialize import serialize_config
+from compose.config.serialize import serialize_ns_time_value
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import nanoseconds_from_time_seconds
+from tests import mock
+from tests import unittest
+
+DEFAULT_VERSION = V2_0
+
+
+def make_service_dict(name, service_dict, working_dir, filename=None):
+ """Test helper function to construct a ServiceExtendsResolver
+ """
+ resolver = config.ServiceExtendsResolver(
+ config.ServiceConfig(
+ working_dir=working_dir,
+ filename=filename,
+ name=name,
+ config=service_dict),
+ config.ConfigFile(filename=filename, config={}),
+ environment=Environment.from_env_file(working_dir)
+ )
+ return config.process_service(resolver.run())
+
+
+def service_sort(services):
+ return sorted(services, key=itemgetter('name'))
+
+
+def secret_sort(secrets):
+ return sorted(secrets, key=itemgetter('source'))
+
+
+class ConfigTest(unittest.TestCase):
+
+ def test_load(self):
+ service_dicts = config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox'},
+ 'bar': {'image': 'busybox', 'environment': ['FOO=1']},
+ },
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
+ )
+
+ def test_load_v2(self):
+ config_data = config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'foo': {'image': 'busybox'},
+ 'bar': {'image': 'busybox', 'environment': ['FOO=1']},
+ },
+ 'volumes': {
+ 'hello': {
+ 'driver': 'default',
+ 'driver_opts': {'beep': 'boop'}
+ }
+ },
+ 'networks': {
+ 'default': {
+ 'driver': 'bridge',
+ 'driver_opts': {'beep': 'boop'}
+ },
+ 'with_ipam': {
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.28.0.0/16'}
+ ]
+ }
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
+ }
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+ service_dicts = config_data.services
+ volume_dict = config_data.volumes
+ networks_dict = config_data.networks
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
+ )
+ self.assertEqual(volume_dict, {
+ 'hello': {
+ 'driver': 'default',
+ 'driver_opts': {'beep': 'boop'}
+ }
+ })
+ self.assertEqual(networks_dict, {
+ 'default': {
+ 'driver': 'bridge',
+ 'driver_opts': {'beep': 'boop'}
+ },
+ 'with_ipam': {
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.28.0.0/16'}
+ ]
+ }
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
+ }
+ })
+
+ def test_valid_versions(self):
+ for version in ['2', '2.0']:
+ cfg = config.load(build_config_details({'version': version}))
+ assert cfg.version == V2_0
+
+ cfg = config.load(build_config_details({'version': '2.1'}))
+ assert cfg.version == V2_1
+
+ cfg = config.load(build_config_details({'version': '2.2'}))
+ assert cfg.version == V2_2
+
+ cfg = config.load(build_config_details({'version': '2.3'}))
+ assert cfg.version == V2_3
+
+ for version in ['3', '3.0']:
+ cfg = config.load(build_config_details({'version': version}))
+ assert cfg.version == V3_0
+
+ cfg = config.load(build_config_details({'version': '3.1'}))
+ assert cfg.version == V3_1
+
+ def test_v1_file_version(self):
+ cfg = config.load(build_config_details({'web': {'image': 'busybox'}}))
+ assert cfg.version == V1
+ assert list(s['name'] for s in cfg.services) == ['web']
+
+ cfg = config.load(build_config_details({'version': {'image': 'busybox'}}))
+ assert cfg.version == V1
+ assert list(s['name'] for s in cfg.services) == ['version']
+
+ def test_wrong_version_type(self):
+ for version in [None, 1, 2, 2.0]:
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'version': version},
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is invalid - it should be a string.' \
+ in excinfo.exconly()
+
+ def test_unsupported_version(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'version': '2.18'},
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is unsupported' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_version_1_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '1',
+ 'web': {'image': 'busybox'},
+ },
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Version in "filename.yml" is invalid' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_v1_file_with_version_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'web': {'image': 'busybox'},
+ },
+ filename='filename.yml',
+ )
+ )
+
+ assert 'Invalid top-level property "web"' in excinfo.exconly()
+ assert VERSION_EXPLANATION in excinfo.exconly()
+
+ def test_named_volume_config_empty(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': None,
+ 'other': {},
+ }
+ })
+ config_result = config.load(config_details)
+ volumes = config_result.volumes
+ assert 'simple' in volumes
+ assert volumes['simple'] == {}
+ assert volumes['other'] == {}
+
+ def test_named_volume_numeric_driver_opt(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': {'driver_opts': {'size': 42}},
+ }
+ })
+ cfg = config.load(config_details)
+ assert cfg.volumes['simple']['driver_opts']['size'] == '42'
+
+ def test_volume_invalid_driver_opt(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'simple': {'driver_opts': {'size': True}},
+ }
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert 'driver_opts.size contains an invalid type' in exc.exconly()
+
+ def test_named_volume_invalid_type_list(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'volumes': []
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "volume must be a mapping, not an array" in exc.exconly()
+
+ def test_networks_invalid_type_list(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'simple': {'image': 'busybox'}
+ },
+ 'networks': []
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "network must be a mapping, not an array" in exc.exconly()
+
+ def test_load_service_with_name_version(self):
+ with mock.patch('compose.config.config.log') as mock_logging:
+ config_data = config.load(
+ build_config_details({
+ 'version': {
+ 'image': 'busybox'
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+
+ assert 'Unexpected type for "version" key in "filename.yml"' \
+ in mock_logging.warn.call_args[0][0]
+
+ service_dicts = config_data.services
+ self.assertEqual(
+ service_sort(service_dicts),
+ service_sort([
+ {
+ 'name': 'version',
+ 'image': 'busybox',
+ }
+ ])
+ )
+
+ def test_load_throws_error_when_not_dict(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {'web': 'busybox:latest'},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_load_throws_error_when_not_dict_v2(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {'version': '2', 'services': {'web': 'busybox:latest'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_load_throws_error_with_invalid_network_fields(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {'web': 'busybox:latest'},
+ 'networks': {
+ 'invalid': {'foo', 'bar'}
+ }
+ }, 'working_dir', 'filename.yml')
+ )
+
+ def test_load_config_link_local_ips_network(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }
+ },
+ 'networks': {'foobar': {}}
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+
+ def test_load_config_service_labels(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'labels': ['label_key=label_val']
+ },
+ 'db': {
+ 'image': 'example/db',
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ for service in service_dicts:
+ assert service['labels'] == {
+ 'label_key': 'label_val'
+ }
+
+ def test_load_config_volume_and_network_labels(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ },
+ },
+ 'networks': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ 'volumes': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ loaded_config = config.load(details)
+
+ assert loaded_config.networks == {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+
+ assert loaded_config.volumes == {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+
+ def test_load_config_invalid_service_names(self):
+ for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {invalid_name: {'image': 'busybox'}}))
+ assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
+
+ def test_load_config_invalid_service_names_v2(self):
+ for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'version': '2',
+ 'services': {invalid_name: {'image': 'busybox'}},
+ }))
+ assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
+
+ def test_load_with_invalid_field_name(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {'image': 'busybox', 'name': 'bogus'},
+ }
+ },
+ 'working_dir',
+ 'filename.yml',
+ ))
+
+ assert "Unsupported config option for services.web: 'name'" in exc.exconly()
+
+ def test_load_with_invalid_field_name_v1(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {'image': 'busybox', 'name': 'bogus'},
+ },
+ 'working_dir',
+ 'filename.yml',
+ ))
+
+ assert "Unsupported config option for web: 'name'" in exc.exconly()
+
+ def test_load_invalid_service_definition(self):
+ config_details = build_config_details(
+ {'web': 'wrong'},
+ 'working_dir',
+ 'filename.yml')
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "service 'web' must be a mapping not a string." in exc.exconly()
+
+ def test_load_with_empty_build_args(self):
+ config_details = build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'args': None,
+ },
+ },
+ },
+ }
+ )
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert (
+ "services.web.build.args contains an invalid type, it should be an "
+ "object, or an array" in exc.exconly()
+ )
+
+ def test_config_integer_service_name_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {1: {'image': 'busybox'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert (
+ "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'" in
+ excinfo.exconly()
+ )
+
+ def test_config_integer_service_name_raise_validation_error_v2(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {1: {'image': 'busybox'}}
+ },
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert (
+ "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in
+ excinfo.exconly()
+ )
+
+ def test_config_invalid_service_name_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'test_app': {'build': '.'},
+ 'mong\\o': {'image': 'mongo'},
+ }
+ })
+ )
+
+ assert 'Invalid service name \'mong\\o\'' in excinfo.exconly()
+
+ def test_config_duplicate_cache_from_values_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'test': {'build': {'context': '.', 'cache_from': ['a', 'b', 'a']}}
+ }
+
+ })
+ )
+
+ assert 'build.cache_from contains non-unique items' in exc.exconly()
+
+ def test_load_with_multiple_files_v1(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {
+ 'image': 'example/web',
+ 'links': ['db'],
+ },
+ 'db': {
+ 'image': 'example/db',
+ },
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'web': {
+ 'build': '/',
+ 'volumes': ['/home/user/project:/code'],
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'build': {'context': os.path.abspath('/')},
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'links': ['db'],
+ },
+ {
+ 'name': 'db',
+ 'image': 'example/db',
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_with_multiple_files_and_empty_override(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {'web': {'image': 'example/web'}})
+ override_file = config.ConfigFile('override.yml', None)
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ error_msg = "Top level object in 'override.yml' needs to be an object"
+ assert error_msg in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_override_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {'version': '2', 'services': {'web': {'image': 'example/web'}}})
+ override_file = config.ConfigFile('override.yml', None)
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ error_msg = "Top level object in 'override.yml' needs to be an object"
+ assert error_msg in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_base(self):
+ base_file = config.ConfigFile('base.yml', None)
+ override_file = config.ConfigFile(
+ 'override.yml',
+ {'web': {'image': 'example/web'}})
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
+
+ def test_load_with_multiple_files_and_empty_base_v2(self):
+ base_file = config.ConfigFile('base.yml', None)
+ override_file = config.ConfigFile(
+ 'override.tml',
+ {'version': '2', 'services': {'web': {'image': 'example/web'}}}
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
+
+ def test_load_with_multiple_files_and_extends_in_override_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {'image': 'example/web'},
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'web': {
+ 'extends': {
+ 'file': 'common.yml',
+ 'service': 'base',
+ },
+ 'volumes': ['/home/user/project:/code'],
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ tmpdir = py.test.ensuretemp('config_test')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('common.yml').write("""
+ base:
+ labels: ['label=one']
+ """)
+ with tmpdir.as_cwd():
+ service_dicts = config.load(details).services
+
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'labels': {'label': 'one'},
+ },
+ ]
+ self.assertEqual(service_sort(service_dicts), service_sort(expected))
+
+ def test_load_mixed_extends_resolution(self):
+ main_file = config.ConfigFile(
+ 'main.yml', {
+ 'version': '2.2',
+ 'services': {
+ 'prodweb': {
+ 'extends': {
+ 'service': 'web',
+ 'file': 'base.yml'
+ },
+ 'environment': {'PROD': 'true'},
+ },
+ },
+ }
+ )
+
+ tmpdir = pytest.ensuretemp('config_test')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('base.yml').write("""
+ version: '2.2'
+ services:
+ base:
+ image: base
+ web:
+ extends: base
+ """)
+
+ details = config.ConfigDetails('.', [main_file])
+ with tmpdir.as_cwd():
+ service_dicts = config.load(details).services
+ assert service_dicts[0] == {
+ 'name': 'prodweb',
+ 'image': 'base',
+ 'environment': {'PROD': 'true'},
+ }
+
+ def test_load_with_multiple_files_and_invalid_override(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {'web': {'image': 'example/web'}})
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {'bogus': 'thing'})
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(details)
+ assert "service 'bogus' must be a mapping not a string." in exc.exconly()
+ assert "In file 'override.yaml'" in exc.exconly()
+
+ def test_load_sorts_in_dependency_order(self):
+ config_details = build_config_details({
+ 'web': {
+ 'image': 'busybox:latest',
+ 'links': ['db'],
+ },
+ 'db': {
+ 'image': 'busybox:latest',
+ 'volumes_from': ['volume:ro']
+ },
+ 'volume': {
+ 'image': 'busybox:latest',
+ 'volumes': ['/tmp'],
+ }
+ })
+ services = config.load(config_details).services
+
+ assert services[0]['name'] == 'volume'
+ assert services[1]['name'] == 'db'
+ assert services[2]['name'] == 'web'
+
+ def test_load_with_extensions(self):
+ config_details = build_config_details({
+ 'version': '2.3',
+ 'x-data': {
+ 'lambda': 3,
+ 'excess': [True, {}]
+ }
+ })
+
+ config_data = config.load(config_details)
+ assert config_data.services == []
+
+ def test_config_build_configuration(self):
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'build': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }},
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services
+ self.assertTrue('context' in service[0]['build'])
+ self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+
+ def test_config_build_configuration_v2(self):
+ # service.dockerfile is invalid in v2
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ service = config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '.'
+ }
+ }
+ }, 'tests/fixtures/extends', 'filename.yml')
+ ).services[0]
+ self.assertTrue('context' in service['build'])
+
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt'
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services
+ self.assertTrue('context' in service[0]['build'])
+ self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+
+ def test_load_with_buildargs(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'opt1': 42,
+ 'opt2': 'foobar'
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'opt1' in service['build']['args']
+ assert isinstance(service['build']['args']['opt1'], str)
+ assert service['build']['args']['opt1'] == '42'
+ assert service['build']['args']['opt2'] == 'foobar'
+
+ def test_load_build_labels_dict(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': str(V3_3),
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'labels': {
+ 'label1': 42,
+ 'label2': 'foobar'
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'labels' in service['build']
+ assert 'label1' in service['build']['labels']
+ assert service['build']['labels']['label1'] == 42
+ assert service['build']['labels']['label2'] == 'foobar'
+
+ def test_load_build_labels_list(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'labels': ['foo=bar', 'baz=true', 'foobar=1']
+ },
+ },
+ },
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ service = config.load(details).services[0]
+ assert service['build']['labels'] == {
+ 'foo': 'bar', 'baz': 'true', 'foobar': '1'
+ }
+
+ def test_build_args_allow_empty_properties(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'foo': None
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'foo' in service['build']['args']
+ assert service['build']['args']['foo'] == ''
+
+ # If build argument is None then it will be converted to the empty
+ # string. Make sure that int zero kept as it is, i.e. not converted to
+ # the empty string
+ def test_build_args_check_zero_preserved(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'args': {
+ 'foo': 0
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'args' in service['build']
+ assert 'foo' in service['build']['args']
+ assert service['build']['args']['foo'] == '0'
+
+ def test_load_with_multiple_files_mismatched_networks_format(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {'aliases': ['foo', 'bar']}
+ }
+ }
+ },
+ 'networks': {'foobar': {}, 'baz': {}}
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'networks': ['baz']
+ }
+ }
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file, override_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {'aliases': ['foo', 'bar']},
+ 'baz': None
+ }
+
+ def test_load_with_multiple_files_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'depends_on': ['db'],
+ },
+ 'db': {
+ 'image': 'example/db',
+ }
+ },
+ })
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'build': '/',
+ 'volumes': ['/home/user/project:/code'],
+ 'depends_on': ['other'],
+ },
+ 'other': {
+ 'image': 'example/other',
+ }
+ }
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'build': {'context': os.path.abspath('/')},
+ 'image': 'example/web',
+ 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
+ 'depends_on': {
+ 'db': {'condition': 'service_started'},
+ 'other': {'condition': 'service_started'},
+ },
+ },
+ {
+ 'name': 'db',
+ 'image': 'example/db',
+ },
+ {
+ 'name': 'other',
+ 'image': 'example/other',
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ @mock.patch.dict(os.environ)
+ def test_load_with_multiple_files_v3_2(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': [
+ {'source': '/a', 'target': '/b', 'type': 'bind'},
+ {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
+ ]
+ }
+ },
+ 'volumes': {'vol': {}}
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b', '/anonymous']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
+ assert sorted(svc_volumes) == sorted(
+ ['/anonymous', '/c:/b:rw', 'vol:/x:ro']
+ )
+
+ @mock.patch.dict(os.environ)
+ def test_volume_mode_override(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': ['/c:/b:rw']
+ }
+ },
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b:ro']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = list(map(lambda v: v.repr(), service_dicts[0]['volumes']))
+ assert svc_volumes == ['/c:/b:ro']
+
+ def test_undeclared_volume_v2(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['data0028:/data:ro'],
+ },
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ with self.assertRaises(ConfigurationError):
+ config.load(details)
+
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['./data0028:/data:ro'],
+ },
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ volume = config_data.services[0].get('volumes')[0]
+ assert not volume.is_named_volume
+
+ def test_undeclared_volume_v1(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': ['data0028:/data:ro'],
+ },
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ volume = config_data.services[0].get('volumes')[0]
+ assert volume.external == 'data0028'
+ assert volume.is_named_volume
+
+ def test_config_valid_service_names(self):
+ for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
+ services = config.load(
+ build_config_details(
+ {valid_name: {'image': 'busybox'}},
+ 'tests/fixtures/extends',
+ 'common.yml')).services
+ assert services[0]['name'] == valid_name
+
+ def test_config_hint(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'privilige': 'something'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "(did you mean 'privileged'?)" in excinfo.exconly()
+
+ def test_load_errors_on_uppercase_with_no_image(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'Foo': {'build': '.'},
+ }, 'tests/fixtures/build-ctx'))
+ assert "Service 'Foo' contains uppercase characters" in exc.exconly()
+
+ def test_invalid_config_v1(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 1},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.image contains an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_invalid_config_v2(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {
+ 'foo': {'image': 1},
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "services.foo.image contains an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_invalid_config_build_and_image_specified_v1(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'build': '.'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo has both an image and build path specified." in excinfo.exconly()
+
+ def test_invalid_config_type_should_be_an_array(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'links': 'an_link'},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.links contains an invalid type, it should be an array" \
+ in excinfo.exconly()
+
+ def test_invalid_config_not_a_dictionary(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ ['foo', 'lol'],
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "Top level object in 'filename.yml' needs to be an object" \
+ in excinfo.exconly()
+
+ def test_invalid_config_not_unique_items(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'build': '.', 'devices': ['/dev/foo:/dev/foo', '/dev/foo:/dev/foo']}
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "has non-unique elements" in excinfo.exconly()
+
+ def test_invalid_list_of_strings_format(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'build': '.', 'command': [1]}
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.command contains 1, which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_load_config_dockerfile_without_build_raises_error_v1(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'dockerfile': 'Dockerfile.alt'
+ }
+ }))
+
+ assert "web has both an image and alternate Dockerfile." in exc.exconly()
+
+ def test_config_extra_hosts_string_raises_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'extra_hosts': 'somehost:162.242.195.82'
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extra_hosts contains an invalid type" \
+ in excinfo.exconly()
+
+ def test_config_extra_hosts_list_of_dicts_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'extra_hosts': [
+ {'somehost': '162.242.195.82'},
+ {'otherhost': '50.31.209.229'}
+ ]
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extra_hosts contains {\"somehost\": \"162.242.195.82\"}, " \
+ "which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_config_ulimits_invalid_keys_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nofile': {
+ "not_soft_or_hard": 100,
+ "soft": 10000,
+ "hard": 20000,
+ }
+ }
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+
+ assert "web.ulimits.nofile contains unsupported option: 'not_soft_or_hard'" \
+ in exc.exconly()
+
+ def test_config_ulimits_required_keys_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {'nofile': {"soft": 10000}}
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+ assert "web.ulimits.nofile" in exc.exconly()
+ assert "'hard' is a required property" in exc.exconly()
+
+ def test_config_ulimits_soft_greater_than_hard_error(self):
+ expected = "'soft' value can not be greater than 'hard' value"
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nofile': {"soft": 10000, "hard": 1000}
+ }
+ }
+ },
+ 'working_dir',
+ 'filename.yml'))
+ assert expected in exc.exconly()
+
+ def test_valid_config_which_allows_two_type_definitions(self):
+ expose_values = [["8000"], [8000]]
+ for expose in expose_values:
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'expose': expose
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(service[0]['expose'], expose)
+
+ def test_valid_config_oneof_string_or_list(self):
+ entrypoint_values = [["sh"], "sh"]
+ for entrypoint in entrypoint_values:
+ service = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'entrypoint': entrypoint
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(service[0]['entrypoint'], entrypoint)
+
+ def test_logs_warning_for_boolean_in_environment(self):
+ config_details = build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'environment': {'SHOW_STUFF': True}
+ }
+ })
+
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+
+ assert "contains true, which is an invalid type" in exc.exconly()
+
+ def test_config_valid_environment_dict_key_contains_dashes(self):
+ services = config.load(
+ build_config_details(
+ {'web': {
+ 'image': 'busybox',
+ 'environment': {'SPRING_JPA_HIBERNATE_DDL-AUTO': 'none'}
+ }},
+ 'working_dir',
+ 'filename.yml'
+ )
+ ).services
+ self.assertEqual(services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'], 'none')
+
+ def test_load_yaml_with_yaml_error(self):
+ tmpdir = py.test.ensuretemp('invalid_yaml_test')
+ self.addCleanup(tmpdir.remove)
+ invalid_yaml_file = tmpdir.join('docker-compose.yml')
+ invalid_yaml_file.write("""
+ web:
+ this is bogus: ok: what
+ """)
+ with pytest.raises(ConfigurationError) as exc:
+ config.load_yaml(str(invalid_yaml_file))
+
+ assert 'line 3, column 32' in exc.exconly()
+
+ def test_validate_extra_hosts_invalid(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'extra_hosts': "www.example.com: 192.168.0.17",
+ }
+ }))
+ assert "web.extra_hosts contains an invalid type" in exc.exconly()
+
+ def test_validate_extra_hosts_invalid_list(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'extra_hosts': [
+ {'www.example.com': '192.168.0.17'},
+ {'api.example.com': '192.168.0.18'}
+ ],
+ }
+ }))
+ assert "which is an invalid type" in exc.exconly()
+
+ def test_normalize_dns_options(self):
+ actual = config.load(build_config_details({
+ 'web': {
+ 'image': 'alpine',
+ 'dns': '8.8.8.8',
+ 'dns_search': 'domain.local',
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'dns': ['8.8.8.8'],
+ 'dns_search': ['domain.local'],
+ }
+ ]
+
+ def test_tmpfs_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'tmpfs': '/run',
+ }
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'tmpfs': ['/run'],
+ }
+ ]
+
+ def test_oom_score_adj_option(self):
+
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ ]
+
+ def test_swappiness_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ ]
+
+ def test_group_add_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ ]
+
+ def test_dns_opt_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ ]
+
+ def test_isolation_option(self):
+ actual = config.load(build_config_details({
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'win10',
+ 'isolation': 'hyperv'
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'win10',
+ 'isolation': 'hyperv',
+ }
+ ]
+
+ def test_merge_service_dicts_from_files_with_extends_in_base(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+ override = {
+ 'image': 'alpine:edge',
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+
+ def test_merge_service_dicts_from_files_with_extends_in_override(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'app'}
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'extends': {'service': 'foo'}
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'extends': {'service': 'foo'}
+ }
+
+ def test_merge_service_dicts_heterogeneous(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'ports': ['5432']
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': [5432]
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
+ }
+
+ def test_merge_service_dicts_heterogeneous_2(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
+ }
+
+ def test_merge_service_dicts_ports_sorting(self):
+ base = {
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432/udp']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert len(actual['ports']) == 2
+ assert types.ServicePort.parse('5432')[0] in actual['ports']
+ assert types.ServicePort.parse('5432/udp')[0] in actual['ports']
+
+ def test_merge_service_dicts_heterogeneous_volumes(self):
+ base = {
+ 'volumes': ['/a:/b', '/x:/z'],
+ }
+
+ override = {
+ 'image': 'alpine:edge',
+ 'volumes': [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'}
+ ]
+ }
+
+ actual = config.merge_service_dicts_from_files(
+ base, override, V3_2
+ )
+
+ assert actual['volumes'] == [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'},
+ '/x:/z'
+ ]
+
+ def test_merge_logging_v1(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'log_driver': 'something',
+ 'log_opt': {'foo': 'three'},
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'command': 'true',
+ }
+ actual = config.merge_service_dicts(base, override, V1)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'log_driver': 'something',
+ 'log_opt': {'foo': 'three'},
+ 'command': 'true',
+ }
+
+ def test_merge_logging_v2(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_override_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_base_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_drivers(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override_options(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog'
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ }
+ }
+
+ def test_merge_logging_v2_no_base(self):
+ base = {
+ 'image': 'alpine:edge'
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_mixed_ports(self):
+ base = {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [
+ {
+ 'target': '1245',
+ 'published': '1245',
+ 'protocol': 'udp',
+ }
+ ]
+ }
+
+ override = {
+ 'ports': ['1245:1245/udp']
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual == {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [types.ServicePort('1245', '1245', 'udp', None, None)]
+ }
+
+ def test_merge_depends_on_no_override(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == base
+
+ def test_merge_depends_on_mixed_syntax(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {
+ 'depends_on': ['app3']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'},
+ 'app3': {'condition': 'service_started'}
+ }
+ }
+
+ def test_empty_environment_key_allowed(self):
+ service_dict = config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'build': '.',
+ 'environment': {
+ 'POSTGRES_PASSWORD': ''
+ },
+ },
+ },
+ '.',
+ None,
+ )
+ ).services[0]
+ self.assertEqual(service_dict['environment']['POSTGRES_PASSWORD'], '')
+
+ def test_merge_pid(self):
+ # Regression: https://github.com/docker/compose/issues/4184
+ base = {
+ 'image': 'busybox',
+ 'pid': 'host'
+ }
+
+ override = {
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'busybox',
+ 'pid': 'host',
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ def test_merge_different_secrets(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'secrets': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert secret_sort(actual['secrets']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_secrets_override(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': ['src.txt'],
+ }
+ override = {
+ 'secrets': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual['secrets'] == override['secrets']
+
+ def test_merge_different_configs(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'configs': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert secret_sort(actual['configs']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_configs_override(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': ['src.txt'],
+ }
+ override = {
+ 'configs': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['configs'] == override['configs']
+
+ def test_merge_deploy(self):
+ base = {
+ 'image': 'busybox',
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == override['deploy']
+
+ def test_merge_deploy_override(self):
+ base = {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+
+ def test_merge_credential_spec(self):
+ base = {
+ 'image': 'bb',
+ 'credential_spec': {
+ 'file': '/hello-world',
+ }
+ }
+
+ override = {
+ 'credential_spec': {
+ 'registry': 'revolution.com',
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['credential_spec'] == override['credential_spec']
+
+ def test_merge_scale(self):
+ base = {
+ 'image': 'bar',
+ 'scale': 2,
+ }
+
+ override = {
+ 'scale': 4,
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {'image': 'bar', 'scale': 4}
+
+ def test_merge_blkio_config(self):
+ base = {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': 300,
+ 'weight_device': [
+ {'path': '/dev/sda1', 'weight': 200}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 300}
+ ],
+ 'device_write_iops': [
+ {'path': '/dev/sda1', 'rate': 1000}
+ ]
+ }
+ }
+
+ override = {
+ 'blkio_config': {
+ 'weight': 450,
+ 'weight_device': [
+ {'path': '/dev/sda2', 'weight': 400}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 2000}
+ ],
+ 'device_read_bps': [
+ {'path': '/dev/sda1', 'rate': 1024}
+ ]
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': override['blkio_config']['weight'],
+ 'weight_device': (
+ base['blkio_config']['weight_device'] +
+ override['blkio_config']['weight_device']
+ ),
+ 'device_read_iops': override['blkio_config']['device_read_iops'],
+ 'device_read_bps': override['blkio_config']['device_read_bps'],
+ 'device_write_iops': base['blkio_config']['device_write_iops']
+ }
+ }
+
+ def test_merge_extra_hosts(self):
+ base = {
+ 'image': 'bar',
+ 'extra_hosts': {
+ 'foo': '1.2.3.4',
+ }
+ }
+
+ override = {
+ 'extra_hosts': ['bar:5.6.7.8', 'foo:127.0.0.1']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual['extra_hosts'] == {
+ 'foo': '127.0.0.1',
+ 'bar': '5.6.7.8',
+ }
+
+ def test_merge_healthcheck_config(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'interval': 5000,
+ 'timeout': 10000,
+ 'test': ['echo', 'OK'],
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {
+ 'start_period': base['healthcheck']['start_period'],
+ 'test': override['healthcheck']['test'],
+ 'interval': override['healthcheck']['interval'],
+ 'timeout': override['healthcheck']['timeout'],
+ }
+
+ def test_merge_healthcheck_override_disables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {'disabled': True}
+
+ def test_merge_healthcheck_override_enables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': False,
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == override['healthcheck']
+
+ def test_external_volume_config(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'bogus': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'ext': {'external': True},
+ 'ext2': {'external': {'name': 'aliased'}}
+ }
+ })
+ config_result = config.load(config_details)
+ volumes = config_result.volumes
+ assert 'ext' in volumes
+ assert volumes['ext']['external'] is True
+ assert 'ext2' in volumes
+ assert volumes['ext2']['external']['name'] == 'aliased'
+
+ def test_external_volume_invalid_config(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'bogus': {'image': 'busybox'}
+ },
+ 'volumes': {
+ 'ext': {'external': True, 'driver': 'foo'}
+ }
+ })
+ with pytest.raises(ConfigurationError):
+ config.load(config_details)
+
+ def test_depends_on_orders_services(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'image': 'busybox', 'depends_on': ['three', 'two']},
+ 'two': {'image': 'busybox', 'depends_on': ['three']},
+ 'three': {'image': 'busybox'},
+ },
+ })
+ actual = config.load(config_details)
+ assert (
+ [service['name'] for service in actual.services] ==
+ ['three', 'two', 'one']
+ )
+
+ def test_depends_on_unknown_service_errors(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'image': 'busybox', 'depends_on': ['three']},
+ },
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert "Service 'one' depends on service 'three'" in exc.exconly()
+
+ def test_linked_service_is_undefined(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {'image': 'busybox', 'links': ['db:db']},
+ },
+ })
+ )
+
+ def test_load_dockerfile_without_context(self):
+ config_details = build_config_details({
+ 'version': '2',
+ 'services': {
+ 'one': {'build': {'dockerfile': 'Dockerfile.foo'}},
+ },
+ })
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+ assert 'has neither an image nor a build context' in exc.exconly()
+
+ def test_load_secrets(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_secrets_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': ['one'],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'secrets': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': ['one'],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'configs': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+
+class NetworkModeTest(unittest.TestCase):
+
+ def test_network_mode_standard(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'bridge',
+ },
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'bridge'
+
+ def test_network_mode_standard_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'bridge',
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'bridge'
+ assert 'net' not in config_data.services[0]
+
+ def test_network_mode_container(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'container:foo',
+ },
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'container:foo'
+
+ def test_network_mode_container_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'container:foo',
+ },
+ }))
+
+ assert config_data.services[0]['network_mode'] == 'container:foo'
+
+ def test_network_mode_service(self):
+ config_data = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'service:foo',
+ },
+ 'foo': {
+ 'image': 'busybox',
+ 'command': "top",
+ },
+ },
+ }))
+
+ assert config_data.services[1]['network_mode'] == 'service:foo'
+
+ def test_network_mode_service_v1(self):
+ config_data = config.load(build_config_details({
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'net': 'container:foo',
+ },
+ 'foo': {
+ 'image': 'busybox',
+ 'command': "top",
+ },
+ }))
+
+ assert config_data.services[1]['network_mode'] == 'service:foo'
+
+ def test_network_mode_service_nonexistent(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'service:foo',
+ },
+ },
+ }))
+
+ assert "service 'foo' which is undefined" in excinfo.exconly()
+
+ def test_network_mode_plus_networks_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': "top",
+ 'network_mode': 'bridge',
+ 'networks': ['front'],
+ },
+ },
+ 'networks': {
+ 'front': None,
+ }
+ }))
+
+ assert "'network_mode' and 'networks' cannot be combined" in excinfo.exconly()
+
+
+class PortsTest(unittest.TestCase):
+ INVALID_PORTS_TYPES = [
+ {"1": "8000"},
+ False,
+ "8000",
+ 8000,
+ ]
+
+ NON_UNIQUE_SINGLE_PORTS = [
+ ["8000", "8000"],
+ ]
+
+ INVALID_PORT_MAPPINGS = [
+ ["8000-8004:8000-8002"],
+ ["4242:4242-4244"],
+ ]
+
+ VALID_SINGLE_PORTS = [
+ ["8000"],
+ ["8000/tcp"],
+ ["8000", "9000"],
+ [8000],
+ [8000, 9000],
+ ]
+
+ VALID_PORT_MAPPINGS = [
+ ["8000:8050"],
+ ["49153-49154:3002-3003"],
+ ]
+
+ def test_config_invalid_ports_type_validation(self):
+ for invalid_ports in self.INVALID_PORTS_TYPES:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "contains an invalid type" in exc.value.msg
+
+ def test_config_non_unique_ports_validation(self):
+ for invalid_ports in self.NON_UNIQUE_SINGLE_PORTS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "non-unique" in exc.value.msg
+
+ def test_config_invalid_ports_format_validation(self):
+ for invalid_ports in self.INVALID_PORT_MAPPINGS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'ports': invalid_ports})
+
+ assert "Port ranges don't match in length" in exc.value.msg
+
+ def test_config_valid_ports_format_validation(self):
+ for valid_ports in self.VALID_SINGLE_PORTS + self.VALID_PORT_MAPPINGS:
+ self.check_config({'ports': valid_ports})
+
+ def test_config_invalid_expose_type_validation(self):
+ for invalid_expose in self.INVALID_PORTS_TYPES:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "contains an invalid type" in exc.value.msg
+
+ def test_config_non_unique_expose_validation(self):
+ for invalid_expose in self.NON_UNIQUE_SINGLE_PORTS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "non-unique" in exc.value.msg
+
+ def test_config_invalid_expose_format_validation(self):
+ # Valid port mappings ARE NOT valid 'expose' entries
+ for invalid_expose in self.INVALID_PORT_MAPPINGS + self.VALID_PORT_MAPPINGS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config({'expose': invalid_expose})
+
+ assert "should be of the format" in exc.value.msg
+
+ def test_config_valid_expose_format_validation(self):
+ # Valid single ports ARE valid 'expose' entries
+ for valid_expose in self.VALID_SINGLE_PORTS:
+ self.check_config({'expose': valid_expose})
+
+ def check_config(self, cfg):
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'web': dict(image='busybox', **cfg)
+ },
+ }, 'working_dir', 'filename.yml')
+ )
+
+
+class InterpolationTest(unittest.TestCase):
+
+ @mock.patch.dict(os.environ)
+ def test_config_file_with_environment_file(self):
+ project_dir = 'tests/fixtures/default-env-file'
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir)
+ )
+ ).services
+
+ self.assertEqual(service_dicts[0], {
+ 'name': 'web',
+ 'image': 'alpine:latest',
+ 'ports': [
+ types.ServicePort.parse('5643')[0],
+ types.ServicePort.parse('9999')[0]
+ ],
+ 'command': 'true'
+ })
+
+ @mock.patch.dict(os.environ)
+ def test_config_file_with_environment_variable(self):
+ project_dir = 'tests/fixtures/environment-interpolation'
+ os.environ.update(
+ IMAGE="busybox",
+ HOST_PORT="80",
+ LABEL_VALUE="myvalue",
+ )
+
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir)
+ )
+ ).services
+
+ self.assertEqual(service_dicts, [
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'ports': types.ServicePort.parse('80:8000'),
+ 'labels': {'mylabel': 'myvalue'},
+ 'hostname': 'host-',
+ 'command': '${ESCAPED}',
+ }
+ ])
+
+ @mock.patch.dict(os.environ)
+ def test_unset_variable_produces_warning(self):
+ os.environ.pop('FOO', None)
+ os.environ.pop('BAR', None)
+ config_details = build_config_details(
+ {
+ 'web': {
+ 'image': '${FOO}',
+ 'command': '${BAR}',
+ 'container_name': '${BAR}',
+ },
+ },
+ '.',
+ None,
+ )
+
+ with mock.patch('compose.config.environment.log') as log:
+ config.load(config_details)
+
+ self.assertEqual(2, log.warn.call_count)
+ warnings = sorted(args[0][0] for args in log.warn.call_args_list)
+ self.assertIn('BAR', warnings[0])
+ self.assertIn('FOO', warnings[1])
+
+ @mock.patch.dict(os.environ)
+ def test_invalid_interpolation(self):
+ with self.assertRaises(config.ConfigurationError) as cm:
+ config.load(
+ build_config_details(
+ {'web': {'image': '${'}},
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ self.assertIn('Invalid', cm.exception.msg)
+ self.assertIn('for "image" option', cm.exception.msg)
+ self.assertIn('in service "web"', cm.exception.msg)
+ self.assertIn('"${"', cm.exception.msg)
+
+ @mock.patch.dict(os.environ)
+ def test_interpolation_secrets_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'secrets': {
+ 'secretdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.secrets == {
+ 'secretdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
+
+ @mock.patch.dict(os.environ)
+ def test_interpolation_configs_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'configs': {
+ 'configdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.configs == {
+ 'configdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
+
+
+class VolumeConfigTest(unittest.TestCase):
+
+ def test_no_binding(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/data'])
+
+ @mock.patch.dict(os.environ)
+ def test_volume_binding_with_environment_variable(self):
+ os.environ['VOLUME_PATH'] = '/host/path'
+
+ d = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}},
+ '.',
+ None,
+ )
+ ).services[0]
+ self.assertEqual(d['volumes'], [VolumeSpec.parse('/host/path:/container/path')])
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
+ @mock.patch.dict(os.environ)
+ def test_volume_binding_with_home(self):
+ os.environ['HOME'] = '/home/user'
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/home/user:/container/path'])
+
+ def test_name_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['mydatavolume:/data'])
+
+ def test_absolute_posix_path_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['/var/lib/data:/data'])
+
+ def test_absolute_windows_path_does_not_expand(self):
+ d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.')
+ self.assertEqual(d['volumes'], ['c:\\data:/data'])
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
+ def test_relative_path_does_expand_posix(self):
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['./data:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['.:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/myproject:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['../otherproject:/data']},
+ working_dir='/home/me/myproject')
+ self.assertEqual(d['volumes'], ['/home/me/otherproject:/data'])
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths')
+ def test_relative_path_does_expand_windows(self):
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['./data:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['.:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data'])
+
+ d = make_service_dict(
+ 'foo',
+ {'build': '.', 'volumes': ['../otherproject:/data']},
+ working_dir='c:\\Users\\me\\myproject')
+ self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data'])
+
+ @mock.patch.dict(os.environ)
+ def test_home_directory_with_driver_does_not_expand(self):
+ os.environ['NAME'] = 'surprise!'
+ d = make_service_dict('foo', {
+ 'build': '.',
+ 'volumes': ['~:/data'],
+ 'volume_driver': 'foodriver',
+ }, working_dir='.')
+ self.assertEqual(d['volumes'], ['~:/data'])
+
+ def test_volume_path_with_non_ascii_directory(self):
+ volume = u'/Füü/data:/data'
+ container_path = config.resolve_volume_path(".", volume)
+ self.assertEqual(container_path, volume)
+
+
+class MergePathMappingTest(object):
+ config_name = ""
+
+ def test_empty(self):
+ service_dict = config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+ assert self.config_name not in service_dict
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/foo:/code', '/data'])
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: ['/bar:/code']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code'])
+
+ def test_override_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {self.config_name: ['/bar:/code']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+
+ def test_add_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/data']},
+ {self.config_name: ['/bar:/code', '/quux:/data']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/quux:/data'])
+
+ def test_remove_explicit_path(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: ['/foo:/code', '/quux:/data']},
+ {self.config_name: ['/bar:/code', '/data']},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+
+
+class MergeVolumesTest(unittest.TestCase, MergePathMappingTest):
+ config_name = 'volumes'
+
+
+class MergeDevicesTest(unittest.TestCase, MergePathMappingTest):
+ config_name = 'devices'
+
+
+class BuildOrImageMergeTest(unittest.TestCase):
+
+ def test_merge_build_or_image_no_override(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {}, V1),
+ {'build': '.'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {}, V1),
+ {'image': 'redis'},
+ )
+
+ def test_merge_build_or_image_override_with_same(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {'build': './web'}, V1),
+ {'build': './web'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}, V1),
+ {'image': 'postgres'},
+ )
+
+ def test_merge_build_or_image_override_with_other(self):
+ self.assertEqual(
+ config.merge_service_dicts({'build': '.'}, {'image': 'redis'}, V1),
+ {'image': 'redis'},
+ )
+
+ self.assertEqual(
+ config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1),
+ {'build': '.'}
+ )
+
+
+class MergeListsTest(object):
+ config_name = ""
+ base_config = []
+ override_config = []
+
+ def merged_config(self):
+ return set(self.base_config) | set(self.override_config)
+
+ def test_empty(self):
+ assert self.config_name not in config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.base_config)
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.base_config)
+
+ def test_add_item(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {self.config_name: self.override_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == set(self.merged_config())
+
+
+class MergePortsTest(unittest.TestCase, MergeListsTest):
+ config_name = 'ports'
+ base_config = ['10:8000', '9000']
+ override_config = ['20:8000']
+
+ def merged_config(self):
+ return self.convert(self.base_config) | self.convert(self.override_config)
+
+ def convert(self, port_config):
+ return set(config.merge_service_dicts(
+ {self.config_name: port_config},
+ {self.config_name: []},
+ DEFAULT_VERSION
+ )[self.config_name])
+
+ def test_duplicate_port_mappings(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION
+ )
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+
+class MergeNetworksTest(unittest.TestCase, MergeListsTest):
+ config_name = 'networks'
+ base_config = ['frontend', 'backend']
+ override_config = ['monitoring']
+
+
+class MergeStringsOrListsTest(unittest.TestCase):
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': '8.8.8.8'},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8'])
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {'dns': '8.8.8.8'},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8'])
+
+ def test_add_string(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': ['8.8.8.8']},
+ {'dns': '9.9.9.9'},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+
+ def test_add_list(self):
+ service_dict = config.merge_service_dicts(
+ {'dns': '8.8.8.8'},
+ {'dns': ['9.9.9.9']},
+ DEFAULT_VERSION)
+ assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+
+
+class MergeLabelsTest(unittest.TestCase):
+
+ def test_empty(self):
+ assert 'labels' not in config.merge_service_dicts({}, {}, DEFAULT_VERSION)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.'}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': ''}
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.'}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '2'}
+
+ def test_override_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '2', 'bar': ''}
+
+ def test_add_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['bar=2']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': '2'}
+
+ def test_remove_explicit_value(self):
+ service_dict = config.merge_service_dicts(
+ make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar=2']}, 'tests/'),
+ make_service_dict('foo', {'build': '.', 'labels': ['bar']}, 'tests/'),
+ DEFAULT_VERSION)
+ assert service_dict['labels'] == {'foo': '1', 'bar': ''}
+
+
+class MergeBuildTest(unittest.TestCase):
+ def test_full(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': ['com.docker.compose.test=true']
+ }
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': ['x=12'],
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result['context'] == override['context']
+ assert result['dockerfile'] == override['dockerfile']
+ assert result['args'] == {'x': '12', 'y': '2'}
+ assert set(result['cache_from']) == set(['ubuntu', 'debian'])
+ assert result['labels'] == override['labels']
+
+ def test_empty_override(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': {
+ 'com.docker.compose.test': 'true'
+ }
+ }
+
+ override = {}
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == base
+
+ def test_empty_base(self):
+ base = {}
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': {'x': '12'},
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == override
+
+
+class MemoryOptionsTest(unittest.TestCase):
+
+ def test_validation_fails_with_just_memswap_limit(self):
+ """
+ When you set a 'memswap_limit' it is invalid config unless you also set
+ a mem_limit
+ """
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'image': 'busybox', 'memswap_limit': 2000000},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "foo.memswap_limit is invalid: when defining " \
+ "'memswap_limit' you must set 'mem_limit' as well" \
+ in excinfo.exconly()
+
+ def test_validation_with_correct_memswap_values(self):
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'image': 'busybox', 'mem_limit': 1000000, 'memswap_limit': 2000000}},
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+ self.assertEqual(service_dict[0]['memswap_limit'], 2000000)
+
+ def test_memswap_can_be_a_string(self):
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'image': 'busybox', 'mem_limit': "1G", 'memswap_limit': "512M"}},
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+ self.assertEqual(service_dict[0]['memswap_limit'], "512M")
+
+
+class EnvTest(unittest.TestCase):
+
+ def test_parse_environment_as_list(self):
+ environment = [
+ 'NORMAL=F1',
+ 'CONTAINS_EQUALS=F=2',
+ 'TRAILING_EQUALS=',
+ ]
+ self.assertEqual(
+ config.parse_environment(environment),
+ {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''},
+ )
+
+ def test_parse_environment_as_dict(self):
+ environment = {
+ 'NORMAL': 'F1',
+ 'CONTAINS_EQUALS': 'F=2',
+ 'TRAILING_EQUALS': None,
+ }
+ self.assertEqual(config.parse_environment(environment), environment)
+
+ def test_parse_environment_invalid(self):
+ with self.assertRaises(ConfigurationError):
+ config.parse_environment('a=b')
+
+ def test_parse_environment_empty(self):
+ self.assertEqual(config.parse_environment(None), {})
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_environment(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+
+ service_dict = {
+ 'build': '.',
+ 'environment': {
+ 'FILE_DEF': 'F1',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': None,
+ 'NO_DEF': None
+ },
+ }
+ self.assertEqual(
+ resolve_environment(
+ service_dict, Environment.from_env_file(None)
+ ),
+ {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None},
+ )
+
+ def test_resolve_environment_from_env_file(self):
+ self.assertEqual(
+ resolve_environment({'env_file': ['tests/fixtures/env/one.env']}),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
+ )
+
+ def test_environment_overrides_env_file(self):
+ self.assertEqual(
+ resolve_environment({
+ 'environment': {'FOO': 'baz'},
+ 'env_file': ['tests/fixtures/env/one.env'],
+ }),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'},
+ )
+
+ def test_resolve_environment_with_multiple_env_files(self):
+ service_dict = {
+ 'env_file': [
+ 'tests/fixtures/env/one.env',
+ 'tests/fixtures/env/two.env'
+ ]
+ }
+ self.assertEqual(
+ resolve_environment(service_dict),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
+ )
+
+ def test_resolve_environment_nonexistent_file(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details(
+ {'foo': {'image': 'example', 'env_file': 'nonexistent.env'}},
+ working_dir='tests/fixtures/env'))
+
+ assert 'Couldn\'t find env file' in exc.exconly()
+ assert 'nonexistent.env' in exc.exconly()
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_environment_from_env_file_with_empty_values(self):
+ os.environ['FILE_DEF'] = 'E1'
+ os.environ['FILE_DEF_EMPTY'] = 'E2'
+ os.environ['ENV_DEF'] = 'E3'
+ self.assertEqual(
+ resolve_environment(
+ {'env_file': ['tests/fixtures/env/resolve.env']},
+ Environment.from_env_file(None)
+ ),
+ {
+ 'FILE_DEF': u'bär',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': 'E3',
+ 'NO_DEF': None
+ },
+ )
+
+ @mock.patch.dict(os.environ)
+ def test_resolve_build_args(self):
+ os.environ['env_arg'] = 'value2'
+
+ build = {
+ 'context': '.',
+ 'args': {
+ 'arg1': 'value1',
+ 'empty_arg': '',
+ 'env_arg': None,
+ 'no_env': None
+ }
+ }
+ self.assertEqual(
+ resolve_build_args(build['args'], Environment.from_env_file(build['context'])),
+ {'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None},
+ )
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+ @mock.patch.dict(os.environ)
+ def test_resolve_path(self):
+ os.environ['HOSTENV'] = '/tmp'
+ os.environ['CONTAINERENV'] = '/host/tmp'
+
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}},
+ "tests/fixtures/env",
+ )
+ ).services[0]
+ self.assertEqual(
+ set(service_dict['volumes']),
+ set([VolumeSpec.parse('/tmp:/host/tmp')]))
+
+ service_dict = config.load(
+ build_config_details(
+ {'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}},
+ "tests/fixtures/env",
+ )
+ ).services[0]
+ self.assertEqual(
+ set(service_dict['volumes']),
+ set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')]))
+
+
+def load_from_filename(filename, override_dir=None):
+ return config.load(
+ config.find('.', [filename], Environment.from_env_file('.'), override_dir=override_dir)
+ ).services
+
+
+class ExtendsTest(unittest.TestCase):
+
+ def test_extends(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml')
+
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'mydb',
+ 'image': 'busybox',
+ 'command': 'top',
+ },
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'command': 'top',
+ 'network_mode': 'bridge',
+ 'links': ['mydb:db'],
+ 'environment': {
+ "FOO": "1",
+ "BAR": "2",
+ "BAZ": "2",
+ },
+ }
+ ]))
+
+ def test_merging_env_labels_ulimits(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/common-env-labels-ulimits.yml')
+
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'network_mode': 'host',
+ 'environment': {
+ "FOO": "2",
+ "BAR": "1",
+ "BAZ": "3",
+ },
+ 'labels': {'label': 'one'},
+ 'ulimits': {'nproc': 65535, 'memlock': {'soft': 1024, 'hard': 2048}}
+ }
+ ]))
+
+ def test_nested(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/nested.yml')
+
+ self.assertEqual(service_dicts, [
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'network_mode': 'host',
+ 'environment': {
+ "FOO": "2",
+ "BAR": "2",
+ },
+ },
+ ])
+
+ def test_self_referencing_file(self):
+ """
+ We specify a 'file' key that is the filename we're already in.
+ """
+ service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml')
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'environment':
+ {
+ 'YEP': '1', 'BAR': '1', 'BAZ': '3'
+ },
+ 'image': 'busybox',
+ 'name': 'myweb'
+ },
+ {
+ 'environment':
+ {'YEP': '1'},
+ 'image': 'busybox',
+ 'name': 'otherweb'
+ },
+ {
+ 'environment':
+ {'YEP': '1', 'BAZ': '3'},
+ 'image': 'busybox',
+ 'name': 'web'
+ }
+ ]))
+
+ def test_circular(self):
+ with pytest.raises(config.CircularReference) as exc:
+ load_from_filename('tests/fixtures/extends/circle-1.yml')
+
+ path = [
+ (os.path.basename(filename), service_name)
+ for (filename, service_name) in exc.value.trail
+ ]
+ expected = [
+ ('circle-1.yml', 'web'),
+ ('circle-2.yml', 'other'),
+ ('circle-1.yml', 'web'),
+ ]
+ self.assertEqual(path, expected)
+
+ def test_extends_validation_empty_dictionary(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {}},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert 'service' in excinfo.exconly()
+
+ def test_extends_validation_missing_service_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {'file': 'common.yml'}},
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "'service' is a required property" in excinfo.exconly()
+
+ def test_extends_validation_invalid_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'extends': {
+ 'file': 'common.yml',
+ 'service': 'web',
+ 'rogue_key': 'is not allowed'
+ }
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extends contains unsupported option: 'rogue_key'" \
+ in excinfo.exconly()
+
+ def test_extends_validation_sub_property_key(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'image': 'busybox',
+ 'extends': {
+ 'file': 1,
+ 'service': 'web',
+ }
+ },
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ )
+
+ assert "web.extends.file contains 1, which is an invalid type, it should be a string" \
+ in excinfo.exconly()
+
+ def test_extends_validation_no_file_key_no_filename_set(self):
+ dictionary = {'extends': {'service': 'web'}}
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends')
+
+ assert 'file' in excinfo.exconly()
+
+ def test_extends_validation_valid_config(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'web': {'image': 'busybox', 'extends': {'service': 'web', 'file': 'common.yml'}},
+ },
+ 'tests/fixtures/extends',
+ 'common.yml'
+ )
+ ).services
+
+ self.assertEqual(len(service), 1)
+ self.assertIsInstance(service[0], dict)
+ self.assertEqual(service[0]['command'], "/bin/true")
+
+ def test_extended_service_with_invalid_config(self):
+ with pytest.raises(ConfigurationError) as exc:
+ load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml')
+ assert (
+ "myweb has neither an image nor a build context specified" in
+ exc.exconly()
+ )
+
+ def test_extended_service_with_valid_config(self):
+ service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml')
+ self.assertEqual(service[0]['command'], "top")
+
+ def test_extends_file_defaults_to_self(self):
+ """
+ Test not specifying a file in our extends options that the
+ config is valid and correctly extends from itself.
+ """
+ service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml')
+ self.assertEqual(service_sort(service_dicts), service_sort([
+ {
+ 'name': 'myweb',
+ 'image': 'busybox',
+ 'environment': {
+ "BAR": "1",
+ "BAZ": "3",
+ }
+ },
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'environment': {
+ "BAZ": "3",
+ }
+ }
+ ]))
+
+ def test_invalid_links_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-links.yml')
+
+ assert "services with 'links' cannot be extended" in excinfo.exconly()
+
+ def test_invalid_volumes_from_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-volumes.yml')
+
+ assert "services with 'volumes_from' cannot be extended" in excinfo.exconly()
+
+ def test_invalid_net_in_extended_service(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-net-v2.yml')
+
+ assert 'network_mode: service' in excinfo.exconly()
+ assert 'cannot be extended' in excinfo.exconly()
+
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/invalid-net.yml')
+
+ assert 'net: container' in excinfo.exconly()
+ assert 'cannot be extended' in excinfo.exconly()
+
+ @mock.patch.dict(os.environ)
+ def test_load_config_runs_interpolation_in_extended_service(self):
+ os.environ.update(HOSTNAME_VALUE="penguin")
+ expected_interpolated_value = "host-penguin"
+ service_dicts = load_from_filename(
+ 'tests/fixtures/extends/valid-interpolation.yml')
+ for service in service_dicts:
+ assert service['hostname'] == expected_interpolated_value
+
+ @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+ def test_volume_path(self):
+ dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml')
+
+ paths = [
+ VolumeSpec(
+ os.path.abspath('tests/fixtures/volume-path/common/foo'),
+ '/foo',
+ 'rw'),
+ VolumeSpec(
+ os.path.abspath('tests/fixtures/volume-path/bar'),
+ '/bar',
+ 'rw')
+ ]
+
+ self.assertEqual(set(dicts[0]['volumes']), set(paths))
+
+ def test_parent_build_path_dne(self):
+ child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml')
+
+ self.assertEqual(child, [
+ {
+ 'name': 'dnechild',
+ 'image': 'busybox',
+ 'command': '/bin/true',
+ 'environment': {
+ "FOO": "1",
+ "BAR": "2",
+ },
+ },
+ ])
+
+ def test_load_throws_error_when_base_service_does_not_exist(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ load_from_filename('tests/fixtures/extends/nonexistent-service.yml')
+
+ assert "Cannot extend service 'foo'" in excinfo.exconly()
+ assert "Service not found" in excinfo.exconly()
+
+ def test_partial_service_config_in_extends_is_still_valid(self):
+ dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml')
+ self.assertEqual(dicts[0]['environment'], {'FOO': '1'})
+
+ def test_extended_service_with_verbose_and_shorthand_way(self):
+ services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml')
+ self.assertEqual(service_sort(services), service_sort([
+ {
+ 'name': 'base',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1'},
+ },
+ {
+ 'name': 'verbose',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1', 'FOO': '1'},
+ },
+ {
+ 'name': 'shorthand',
+ 'image': 'busybox',
+ 'environment': {'BAR': '1', 'FOO': '2'},
+ },
+ ]))
+
+ @mock.patch.dict(os.environ)
+ def test_extends_with_environment_and_env_files(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_environment')
+ self.addCleanup(tmpdir.remove)
+ commondir = tmpdir.mkdir('common')
+ commondir.join('base.yml').write("""
+ app:
+ image: 'example/app'
+ env_file:
+ - 'envs'
+ environment:
+ - SECRET
+ - TEST_ONE=common
+ - TEST_TWO=common
+ """)
+ tmpdir.join('docker-compose.yml').write("""
+ ext:
+ extends:
+ file: common/base.yml
+ service: app
+ env_file:
+ - 'envs'
+ environment:
+ - THING
+ - TEST_ONE=top
+ """)
+ commondir.join('envs').write("""
+ COMMON_ENV_FILE
+ TEST_ONE=common-env-file
+ TEST_TWO=common-env-file
+ TEST_THREE=common-env-file
+ TEST_FOUR=common-env-file
+ """)
+ tmpdir.join('envs').write("""
+ TOP_ENV_FILE
+ TEST_ONE=top-env-file
+ TEST_TWO=top-env-file
+ TEST_THREE=top-env-file
+ """)
+
+ expected = [
+ {
+ 'name': 'ext',
+ 'image': 'example/app',
+ 'environment': {
+ 'SECRET': 'secret',
+ 'TOP_ENV_FILE': 'secret',
+ 'COMMON_ENV_FILE': 'secret',
+ 'THING': 'thing',
+ 'TEST_ONE': 'top',
+ 'TEST_TWO': 'common',
+ 'TEST_THREE': 'top-env-file',
+ 'TEST_FOUR': 'common-env-file',
+ },
+ },
+ ]
+
+ os.environ['SECRET'] = 'secret'
+ os.environ['THING'] = 'thing'
+ os.environ['COMMON_ENV_FILE'] = 'secret'
+ os.environ['TOP_ENV_FILE'] = 'secret'
+ config = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+
+ assert config == expected
+
+ def test_extends_with_mixed_versions_is_error(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_mixed_version')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ web:
+ extends:
+ file: base.yml
+ service: base
+ image: busybox
+ """)
+ tmpdir.join('base.yml').write("""
+ base:
+ volumes: ['/foo']
+ ports: ['3000:3000']
+ """)
+
+ with pytest.raises(ConfigurationError) as exc:
+ load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ assert 'Version mismatch' in exc.exconly()
+
+ def test_extends_with_defined_version_passes(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_defined_version')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ web:
+ extends:
+ file: base.yml
+ service: base
+ image: busybox
+ """)
+ tmpdir.join('base.yml').write("""
+ version: "2"
+ services:
+ base:
+ volumes: ['/foo']
+ ports: ['3000:3000']
+ command: top
+ """)
+
+ service = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ self.assertEqual(service[0]['command'], "top")
+
+ def test_extends_with_depends_on(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_depends_on')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: "2"
+ services:
+ base:
+ image: example
+ web:
+ extends: base
+ image: busybox
+ depends_on: ['other']
+ other:
+ image: example
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ assert service_sort(services)[2]['depends_on'] == {
+ 'other': {'condition': 'service_started'}
+ }
+
+ def test_extends_with_healthcheck(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/healthcheck-2.yml')
+ assert service_sort(service_dicts) == [{
+ 'name': 'demo',
+ 'image': 'foobar:latest',
+ 'healthcheck': {
+ 'test': ['CMD', '/health.sh'],
+ 'interval': 10000000000,
+ 'timeout': 5000000000,
+ 'retries': 36,
+ }
+ }]
+
+ def test_extends_with_ports(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_ports')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: '2'
+
+ services:
+ a:
+ image: nginx
+ ports:
+ - 80
+
+ b:
+ extends:
+ service: a
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+
+ assert len(services) == 2
+ for svc in services:
+ assert svc['ports'] == [types.ServicePort('80', None, None, None, None)]
+
+
+@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+class ExpandPathTest(unittest.TestCase):
+ working_dir = '/home/user/somedir'
+
+ def test_expand_path_normal(self):
+ result = config.expand_path(self.working_dir, 'myfile')
+ self.assertEqual(result, self.working_dir + '/' + 'myfile')
+
+ def test_expand_path_absolute(self):
+ abs_path = '/home/user/otherdir/somefile'
+ result = config.expand_path(self.working_dir, abs_path)
+ self.assertEqual(result, abs_path)
+
+ def test_expand_path_with_tilde(self):
+ test_path = '~/otherdir/somefile'
+ with mock.patch.dict(os.environ):
+ os.environ['HOME'] = user_path = '/home/user/'
+ result = config.expand_path(self.working_dir, test_path)
+
+ self.assertEqual(result, user_path + 'otherdir/somefile')
+
+
+class VolumePathTest(unittest.TestCase):
+
+ def test_split_path_mapping_with_windows_path(self):
+ host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config"
+ windows_volume_path = host_path + ":/opt/connect/config:ro"
+ expected_mapping = ("/opt/connect/config", (host_path, 'ro'))
+
+ mapping = config.split_path_mapping(windows_volume_path)
+ assert mapping == expected_mapping
+
+ def test_split_path_mapping_with_windows_path_in_container(self):
+ host_path = 'c:\\Users\\remilia\\data'
+ container_path = 'c:\\scarletdevil\\data'
+ expected_mapping = (container_path, (host_path, None))
+
+ mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
+ assert mapping == expected_mapping
+
+ def test_split_path_mapping_with_root_mount(self):
+ host_path = '/'
+ container_path = '/var/hostroot'
+ expected_mapping = (container_path, (host_path, None))
+ mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
+ assert mapping == expected_mapping
+
+
+@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
+class BuildPathTest(unittest.TestCase):
+
+ def setUp(self):
+ self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx')
+
+ def test_nonexistent_path(self):
+ with self.assertRaises(ConfigurationError):
+ config.load(
+ build_config_details(
+ {
+ 'foo': {'build': 'nonexistent.path'},
+ },
+ 'working_dir',
+ 'filename.yml'
+ )
+ )
+
+ def test_relative_path(self):
+ relative_build_path = '../build-ctx/'
+ service_dict = make_service_dict(
+ 'relpath',
+ {'build': relative_build_path},
+ working_dir='tests/fixtures/build-path'
+ )
+ self.assertEqual(service_dict['build'], self.abs_context_path)
+
+ def test_absolute_path(self):
+ service_dict = make_service_dict(
+ 'abspath',
+ {'build': self.abs_context_path},
+ working_dir='tests/fixtures/build-path'
+ )
+ self.assertEqual(service_dict['build'], self.abs_context_path)
+
+ def test_from_file(self):
+ service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
+ self.assertEqual(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+
+ def test_from_file_override_dir(self):
+ override_dir = os.path.join(os.getcwd(), 'tests/fixtures/')
+ service_dict = load_from_filename(
+ 'tests/fixtures/build-path-override-dir/docker-compose.yml', override_dir=override_dir)
+ self.assertEquals(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+
+ def test_valid_url_in_build_path(self):
+ valid_urls = [
+ 'git://github.com/docker/docker',
+ 'git@github.com:docker/docker.git',
+ 'git@bitbucket.org:atlassianlabs/atlassian-docker.git',
+ 'https://github.com/docker/docker.git',
+ 'http://github.com/docker/docker.git',
+ 'github.com/docker/docker.git',
+ ]
+ for valid_url in valid_urls:
+ service_dict = config.load(build_config_details({
+ 'validurl': {'build': valid_url},
+ }, '.', None)).services
+ assert service_dict[0]['build'] == {'context': valid_url}
+
+ def test_invalid_url_in_build_path(self):
+ invalid_urls = [
+ 'example.com/bogus',
+ 'ftp://example.com/',
+ '/path/does/not/exist',
+ ]
+ for invalid_url in invalid_urls:
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(build_config_details({
+ 'invalidurl': {'build': invalid_url},
+ }, '.', None))
+ assert 'build path' in exc.exconly()
+
+
+class HealthcheckTest(unittest.TestCase):
+ def test_healthcheck(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'test': ['CMD', 'true'],
+ 'interval': '1s',
+ 'timeout': '1m',
+ 'retries': 3,
+ 'start_period': '10s'
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['CMD', 'true'],
+ 'interval': nanoseconds_from_time_seconds(1),
+ 'timeout': nanoseconds_from_time_seconds(60),
+ 'retries': 3,
+ 'start_period': nanoseconds_from_time_seconds(10)
+ }
+
+ def test_disable(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'disable': True,
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['NONE'],
+ }
+
+ def test_disable_with_other_config_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ make_service_dict(
+ 'invalid-healthcheck',
+ {'healthcheck': {
+ 'disable': True,
+ 'interval': '1s',
+ }},
+ '.',
+ )
+
+ assert 'invalid-healthcheck' in excinfo.exconly()
+ assert 'disable' in excinfo.exconly()
+
+
+class GetDefaultConfigFilesTestCase(unittest.TestCase):
+
+ files = [
+ 'docker-compose.yml',
+ 'docker-compose.yaml',
+ ]
+
+ def test_get_config_path_default_file_in_basedir(self):
+ for index, filename in enumerate(self.files):
+ self.assertEqual(
+ filename,
+ get_config_filename_for_files(self.files[index:]))
+ with self.assertRaises(config.ComposeFileNotFound):
+ get_config_filename_for_files([])
+
+ def test_get_config_path_default_file_in_parent_dir(self):
+ """Test with files placed in the subdir"""
+
+ def get_config_in_subdir(files):
+ return get_config_filename_for_files(files, subdir=True)
+
+ for index, filename in enumerate(self.files):
+ self.assertEqual(filename, get_config_in_subdir(self.files[index:]))
+ with self.assertRaises(config.ComposeFileNotFound):
+ get_config_in_subdir([])
+
+
+def get_config_filename_for_files(filenames, subdir=None):
+ def make_files(dirname, filenames):
+ for fname in filenames:
+ with open(os.path.join(dirname, fname), 'w') as f:
+ f.write('')
+
+ project_dir = tempfile.mkdtemp()
+ try:
+ make_files(project_dir, filenames)
+ if subdir:
+ base_dir = tempfile.mkdtemp(dir=project_dir)
+ else:
+ base_dir = project_dir
+ filename, = config.get_default_config_files(base_dir)
+ return os.path.basename(filename)
+ finally:
+ shutil.rmtree(project_dir)
+
+
+class SerializeTest(unittest.TestCase):
+ def test_denormalize_depends_on_v3(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': ['service2', 'service3']
+ }
+
+ def test_denormalize_depends_on_v2_1(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V2_1) == service_dict
+
+ def test_serialize_time(self):
+ data = {
+ 9: '9ns',
+ 9000: '9us',
+ 9000000: '9ms',
+ 90000000: '90ms',
+ 900000000: '900ms',
+ 999999999: '999999999ns',
+ 1000000000: '1s',
+ 60000000000: '1m',
+ 60000000001: '60000000001ns',
+ 9000000000000: '150m',
+ 90000000000000: '25h',
+ }
+
+ for k, v in data.items():
+ assert serialize_ns_time_value(k) == v
+
+ def test_denormalize_healthcheck(self):
+ service_dict = {
+ 'image': 'test',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'interval': '1m40s',
+ 'timeout': '30s',
+ 'retries': 5,
+ 'start_period': '2s90ms'
+ }
+ }
+ processed_service = config.process_service(config.ServiceConfig(
+ '.', 'test', 'test', service_dict
+ ))
+ denormalized_service = denormalize_service_dict(processed_service, V2_3)
+ assert denormalized_service['healthcheck']['interval'] == '100s'
+ assert denormalized_service['healthcheck']['timeout'] == '30s'
+ assert denormalized_service['healthcheck']['start_period'] == '2090ms'
+
+ def test_denormalize_image_has_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+ image_digest = 'busybox@sha256:abcde'
+
+ assert denormalize_service_dict(service_dict, V3_0, image_digest) == {
+ 'image': 'busybox@sha256:abcde'
+ }
+
+ def test_denormalize_image_no_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox'
+ }
+
+ def test_serialize_secrets(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'secrets': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ secrets_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'services': {'web': service_dict},
+ 'secrets': secrets_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
+ assert 'secrets' in serialized_config
+ assert serialized_config['secrets']['two'] == secrets_dict['two']
+
+ def test_serialize_ports(self):
+ config_dict = config.Config(version=V2_0, services=[
+ {
+ 'ports': [types.ServicePort('80', '8080', None, None, None)],
+ 'image': 'alpine',
+ 'name': 'web'
+ }
+ ], volumes={}, networks={}, secrets={}, configs={})
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ assert '8080:80/tcp' in serialized_config['services']['web']['ports']
+
+ def test_serialize_configs(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'configs': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ configs_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'services': {'web': service_dict},
+ 'configs': configs_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['configs']) == secret_sort(service_dict['configs'])
+ assert 'configs' in serialized_config
+ assert serialized_config['configs']['two'] == configs_dict['two']
+
+ def test_serialize_bool_string(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'command': 'true',
+ 'environment': {'FOO': 'Y', 'BAR': 'on'}
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = serialize_config(config_dict)
+ assert 'command: "true"\n' in serialized_config
+ assert 'FOO: "Y"\n' in serialized_config
+ assert 'BAR: "on"\n' in serialized_config
+
+ def test_serialize_escape_dollar_sign(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': 'echo $$FOO',
+ 'environment': {
+ 'CURRENCY': '$$'
+ },
+ 'entrypoint': ['$$SHELL', '-c'],
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert serialized_service['environment']['CURRENCY'] == '$$'
+ assert serialized_service['command'] == 'echo $$FOO'
+ assert serialized_service['entrypoint'][0] == '$$SHELL'
diff --git a/tests/unit/config/environment_test.py b/tests/unit/config/environment_test.py
new file mode 100644
index 00000000..20446d2b
--- /dev/null
+++ b/tests/unit/config/environment_test.py
@@ -0,0 +1,40 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from compose.config.environment import Environment
+from tests import unittest
+
+
+class EnvironmentTest(unittest.TestCase):
+ def test_get_simple(self):
+ env = Environment({
+ 'FOO': 'bar',
+ 'BAR': '1',
+ 'BAZ': ''
+ })
+
+ assert env.get('FOO') == 'bar'
+ assert env.get('BAR') == '1'
+ assert env.get('BAZ') == ''
+
+ def test_get_undefined(self):
+ env = Environment({
+ 'FOO': 'bar'
+ })
+ assert env.get('FOOBAR') is None
+
+ def test_get_boolean(self):
+ env = Environment({
+ 'FOO': '',
+ 'BAR': '0',
+ 'BAZ': 'FALSE',
+ 'FOOBAR': 'true',
+ })
+
+ assert env.get_boolean('FOO') is False
+ assert env.get_boolean('BAR') is False
+ assert env.get_boolean('BAZ') is False
+ assert env.get_boolean('FOOBAR') is True
+ assert env.get_boolean('UNDEFINED') is False
diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py
new file mode 100644
index 00000000..018a5621
--- /dev/null
+++ b/tests/unit/config/interpolation_test.py
@@ -0,0 +1,148 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.environment import Environment
+from compose.config.interpolation import interpolate_environment_variables
+from compose.config.interpolation import Interpolator
+from compose.config.interpolation import InvalidInterpolation
+from compose.config.interpolation import TemplateWithDefaults
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+
+
+@pytest.fixture
+def mock_env():
+ return Environment({'USER': 'jenny', 'FOO': 'bar'})
+
+
+@pytest.fixture
+def variable_mapping():
+ return Environment({'FOO': 'first', 'BAR': ''})
+
+
+@pytest.fixture
+def defaults_interpolator(variable_mapping):
+ return Interpolator(TemplateWithDefaults, variable_mapping).interpolate
+
+
+def test_interpolate_environment_variables_in_services(mock_env):
+ services = {
+ 'servicea': {
+ 'image': 'example:${USER}',
+ 'volumes': ['$FOO:/target'],
+ 'logging': {
+ 'driver': '${FOO}',
+ 'options': {
+ 'user': '$USER',
+ }
+ }
+ }
+ }
+ expected = {
+ 'servicea': {
+ 'image': 'example:jenny',
+ 'volumes': ['bar:/target'],
+ 'logging': {
+ 'driver': 'bar',
+ 'options': {
+ 'user': 'jenny',
+ }
+ }
+ }
+ }
+ value = interpolate_environment_variables(V2_0, services, 'service', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_variables_in_volumes(mock_env):
+ volumes = {
+ 'data': {
+ 'driver': '$FOO',
+ 'driver_opts': {
+ 'max': 2,
+ 'user': '${USER}'
+ }
+ },
+ 'other': None,
+ }
+ expected = {
+ 'data': {
+ 'driver': 'bar',
+ 'driver_opts': {
+ 'max': 2,
+ 'user': 'jenny'
+ }
+ },
+ 'other': {},
+ }
+ value = interpolate_environment_variables(V2_0, volumes, 'volume', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_variables_in_secrets(mock_env):
+ secrets = {
+ 'secretservice': {
+ 'file': '$FOO',
+ 'labels': {
+ 'max': 2,
+ 'user': '${USER}'
+ }
+ },
+ 'other': None,
+ }
+ expected = {
+ 'secretservice': {
+ 'file': 'bar',
+ 'labels': {
+ 'max': 2,
+ 'user': 'jenny'
+ }
+ },
+ 'other': {},
+ }
+ value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
+ assert value == expected
+
+
+def test_escaped_interpolation(defaults_interpolator):
+ assert defaults_interpolator('$${foo}') == '${foo}'
+
+
+def test_invalid_interpolation(defaults_interpolator):
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('$}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ foo}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo!}')
+
+
+def test_interpolate_missing_no_default(defaults_interpolator):
+ assert defaults_interpolator("This ${missing} var") == "This var"
+ assert defaults_interpolator("This ${BAR} var") == "This var"
+
+
+def test_interpolate_with_value(defaults_interpolator):
+ assert defaults_interpolator("This $FOO var") == "This first var"
+ assert defaults_interpolator("This ${FOO} var") == "This first var"
+
+
+def test_interpolate_missing_with_default(defaults_interpolator):
+ assert defaults_interpolator("ok ${missing:-def}") == "ok def"
+ assert defaults_interpolator("ok ${missing-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
+
+
+def test_interpolate_with_empty_and_default_value(defaults_interpolator):
+ assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR-def}") == "ok "
diff --git a/tests/unit/config/sort_services_test.py b/tests/unit/config/sort_services_test.py
new file mode 100644
index 00000000..c39ac022
--- /dev/null
+++ b/tests/unit/config/sort_services_test.py
@@ -0,0 +1,243 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.errors import DependencyError
+from compose.config.sort_services import sort_service_dicts
+from compose.config.types import VolumeFromSpec
+
+
+class TestSortService(object):
+ def test_sort_service_dicts_1(self):
+ services = [
+ {
+ 'links': ['redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'grunt'
+ },
+ {
+ 'name': 'redis'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'grunt'
+ assert sorted_services[1]['name'] == 'redis'
+ assert sorted_services[2]['name'] == 'web'
+
+ def test_sort_service_dicts_2(self):
+ services = [
+ {
+ 'links': ['redis', 'postgres'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'postgres',
+ 'links': ['redis']
+ },
+ {
+ 'name': 'redis'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'redis'
+ assert sorted_services[1]['name'] == 'postgres'
+ assert sorted_services[2]['name'] == 'web'
+
+ def test_sort_service_dicts_3(self):
+ services = [
+ {
+ 'name': 'child'
+ },
+ {
+ 'name': 'parent',
+ 'links': ['child']
+ },
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_4(self):
+ services = [
+ {
+ 'name': 'child'
+ },
+ {
+ 'name': 'parent',
+ 'volumes_from': [VolumeFromSpec('child', 'rw', 'service')]
+ },
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_5(self):
+ services = [
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ {
+ 'name': 'parent',
+ 'network_mode': 'service:child'
+ },
+ {
+ 'name': 'child'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_6(self):
+ services = [
+ {
+ 'links': ['parent'],
+ 'name': 'grandparent'
+ },
+ {
+ 'name': 'parent',
+ 'volumes_from': [VolumeFromSpec('child', 'ro', 'service')]
+ },
+ {
+ 'name': 'child'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 3
+ assert sorted_services[0]['name'] == 'child'
+ assert sorted_services[1]['name'] == 'parent'
+ assert sorted_services[2]['name'] == 'grandparent'
+
+ def test_sort_service_dicts_7(self):
+ services = [
+ {
+ 'network_mode': 'service:three',
+ 'name': 'four'
+ },
+ {
+ 'links': ['two'],
+ 'name': 'three'
+ },
+ {
+ 'name': 'two',
+ 'volumes_from': [VolumeFromSpec('one', 'rw', 'service')]
+ },
+ {
+ 'name': 'one'
+ }
+ ]
+
+ sorted_services = sort_service_dicts(services)
+ assert len(sorted_services) == 4
+ assert sorted_services[0]['name'] == 'one'
+ assert sorted_services[1]['name'] == 'two'
+ assert sorted_services[2]['name'] == 'three'
+ assert sorted_services[3]['name'] == 'four'
+
+ def test_sort_service_dicts_circular_imports(self):
+ services = [
+ {
+ 'links': ['redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'redis',
+ 'links': ['web']
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'redis' in exc.exconly()
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_circular_imports_2(self):
+ services = [
+ {
+ 'links': ['postgres', 'redis'],
+ 'name': 'web'
+ },
+ {
+ 'name': 'redis',
+ 'links': ['web']
+ },
+ {
+ 'name': 'postgres'
+ }
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'redis' in exc.exconly()
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_circular_imports_3(self):
+ services = [
+ {
+ 'links': ['b'],
+ 'name': 'a'
+ },
+ {
+ 'name': 'b',
+ 'links': ['c']
+ },
+ {
+ 'name': 'c',
+ 'links': ['a']
+ }
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'a' in exc.exconly()
+ assert 'b' in exc.exconly()
+
+ def test_sort_service_dicts_self_imports(self):
+ services = [
+ {
+ 'links': ['web'],
+ 'name': 'web'
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'web' in exc.exconly()
+
+ def test_sort_service_dicts_depends_on_self(self):
+ services = [
+ {
+ 'depends_on': ['web'],
+ 'name': 'web'
+ },
+ ]
+
+ with pytest.raises(DependencyError) as exc:
+ sort_service_dicts(services)
+ assert 'A service can not depend on itself: web' in exc.exconly()
diff --git a/tests/unit/config/types_test.py b/tests/unit/config/types_test.py
new file mode 100644
index 00000000..3a43f727
--- /dev/null
+++ b/tests/unit/config/types_test.py
@@ -0,0 +1,235 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from compose.config.errors import ConfigurationError
+from compose.config.types import parse_extra_hosts
+from compose.config.types import ServicePort
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
+def test_parse_extra_hosts_list():
+ expected = {'www.example.com': '192.168.0.17'}
+ assert parse_extra_hosts(["www.example.com:192.168.0.17"]) == expected
+
+ expected = {'www.example.com': '192.168.0.17'}
+ assert parse_extra_hosts(["www.example.com: 192.168.0.17"]) == expected
+
+ assert parse_extra_hosts([
+ "www.example.com: 192.168.0.17",
+ "static.example.com:192.168.0.19",
+ "api.example.com: 192.168.0.18",
+ "v6.example.com: ::1"
+ ]) == {
+ 'www.example.com': '192.168.0.17',
+ 'static.example.com': '192.168.0.19',
+ 'api.example.com': '192.168.0.18',
+ 'v6.example.com': '::1'
+ }
+
+
+def test_parse_extra_hosts_dict():
+ assert parse_extra_hosts({
+ 'www.example.com': '192.168.0.17',
+ 'api.example.com': '192.168.0.18'
+ }) == {
+ 'www.example.com': '192.168.0.17',
+ 'api.example.com': '192.168.0.18'
+ }
+
+
+class TestServicePort(object):
+ def test_parse_dict(self):
+ data = {
+ 'target': 8000,
+ 'published': 8000,
+ 'protocol': 'udp',
+ 'mode': 'global',
+ }
+ ports = ServicePort.parse(data)
+ assert len(ports) == 1
+ assert ports[0].repr() == data
+
+ def test_parse_simple_target_port(self):
+ ports = ServicePort.parse(8000)
+ assert len(ports) == 1
+ assert ports[0].target == 8000
+
+ def test_parse_complete_port_definition(self):
+ port_def = '1.1.1.1:3000:3000/udp'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'published': 3000,
+ 'external_ip': '1.1.1.1',
+ 'protocol': 'udp',
+ }
+ assert ports[0].legacy_repr() == port_def
+
+ def test_parse_ext_ip_no_published_port(self):
+ port_def = '1.1.1.1::3000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'external_ip': '1.1.1.1',
+ }
+
+ def test_repr_published_port_0(self):
+ port_def = '0:4000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+
+ def test_parse_port_range(self):
+ ports = ServicePort.parse('25000-25001:4000-4001')
+ assert len(ports) == 2
+ reprs = [p.repr() for p in ports]
+ assert {
+ 'target': 4000,
+ 'published': 25000
+ } in reprs
+ assert {
+ 'target': 4001,
+ 'published': 25001
+ } in reprs
+
+ def test_parse_invalid_port(self):
+ port_def = '4000p'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+
+class TestVolumeSpec(object):
+
+ def test_parse_volume_spec_only_one_path(self):
+ spec = VolumeSpec.parse('/the/volume')
+ assert spec == (None, '/the/volume', 'rw')
+
+ def test_parse_volume_spec_internal_and_external(self):
+ spec = VolumeSpec.parse('external:interval')
+ assert spec == ('external', 'interval', 'rw')
+
+ def test_parse_volume_spec_with_mode(self):
+ spec = VolumeSpec.parse('external:interval:ro')
+ assert spec == ('external', 'interval', 'ro')
+
+ spec = VolumeSpec.parse('external:interval:z')
+ assert spec == ('external', 'interval', 'z')
+
+ def test_parse_volume_spec_too_many_parts(self):
+ with pytest.raises(ConfigurationError) as exc:
+ VolumeSpec.parse('one:two:three:four')
+ assert 'has incorrect format' in exc.exconly()
+
+ def test_parse_volume_windows_absolute_path_normalized(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ "/c/Users/me/Documents/shiny/config",
+ "/opt/shiny/config",
+ "ro"
+ )
+
+ def test_parse_volume_windows_absolute_path_native(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ "c:\\Users\\me\\Documents\\shiny\\config",
+ "/opt/shiny/config",
+ "ro"
+ )
+
+ def test_parse_volume_windows_internal_path_normalized(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Users/reimu/scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_internal_path_native(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Users\\reimu\\scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_normalized(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/e/',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_native(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'E:\\',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_mixed_notations_normalized(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Foo',
+ '/root/foo',
+ 'rw'
+ )
+
+ def test_parse_volume_windows_mixed_notations_native(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Foo',
+ '/root/foo',
+ 'rw'
+ )
+
+
+class TestVolumesFromSpec(object):
+
+ services = ['servicea', 'serviceb']
+
+ def test_parse_v1_from_service(self):
+ volume_from = VolumeFromSpec.parse('servicea', self.services, V1)
+ assert volume_from == VolumeFromSpec('servicea', 'rw', 'service')
+
+ def test_parse_v1_from_container(self):
+ volume_from = VolumeFromSpec.parse('foo:ro', self.services, V1)
+ assert volume_from == VolumeFromSpec('foo', 'ro', 'container')
+
+ def test_parse_v1_invalid(self):
+ with pytest.raises(ConfigurationError):
+ VolumeFromSpec.parse('unknown:format:ro', self.services, V1)
+
+ def test_parse_v2_from_service(self):
+ volume_from = VolumeFromSpec.parse('servicea', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('servicea', 'rw', 'service')
+
+ def test_parse_v2_from_service_with_mode(self):
+ volume_from = VolumeFromSpec.parse('servicea:ro', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('servicea', 'ro', 'service')
+
+ def test_parse_v2_from_container(self):
+ volume_from = VolumeFromSpec.parse('container:foo', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('foo', 'rw', 'container')
+
+ def test_parse_v2_from_container_with_mode(self):
+ volume_from = VolumeFromSpec.parse('container:foo:ro', self.services, V2_0)
+ assert volume_from == VolumeFromSpec('foo', 'ro', 'container')
+
+ def test_parse_v2_invalid_type(self):
+ with pytest.raises(ConfigurationError) as exc:
+ VolumeFromSpec.parse('bogus:foo:ro', self.services, V2_0)
+ assert "Unknown volumes_from type 'bogus'" in exc.exconly()
+
+ def test_parse_v2_invalid(self):
+ with pytest.raises(ConfigurationError):
+ VolumeFromSpec.parse('unknown:format:ro', self.services, V2_0)
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
new file mode 100644
index 00000000..04f43016
--- /dev/null
+++ b/tests/unit/container_test.py
@@ -0,0 +1,198 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+
+from .. import mock
+from .. import unittest
+from compose.container import Container
+from compose.container import get_container_name
+
+
+class ContainerTest(unittest.TestCase):
+
+ def setUp(self):
+ self.container_id = "abcabcabcbabc12345"
+ self.container_dict = {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Command": "top",
+ "Created": 1387384730,
+ "Status": "Up 8 seconds",
+ "Ports": None,
+ "SizeRw": 0,
+ "SizeRootFs": 0,
+ "Names": ["/composetest_db_1", "/composetest_web_1/db"],
+ "NetworkSettings": {
+ "Ports": {},
+ },
+ "Config": {
+ "Labels": {
+ "com.docker.compose.project": "composetest",
+ "com.docker.compose.service": "web",
+ "com.docker.compose.container-number": 7,
+ },
+ }
+ }
+
+ def test_from_ps(self):
+ container = Container.from_ps(None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(
+ container.dictionary,
+ {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Name": "/composetest_db_1",
+ })
+
+ def test_from_ps_prefixed(self):
+ self.container_dict['Names'] = [
+ '/swarm-host-1' + n for n in self.container_dict['Names']
+ ]
+
+ container = Container.from_ps(
+ None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(container.dictionary, {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Name": "/composetest_db_1",
+ })
+
+ def test_environment(self):
+ container = Container(None, {
+ 'Id': 'abc',
+ 'Config': {
+ 'Env': [
+ 'FOO=BAR',
+ 'BAZ=DOGE',
+ ]
+ }
+ }, has_been_inspected=True)
+ self.assertEqual(container.environment, {
+ 'FOO': 'BAR',
+ 'BAZ': 'DOGE',
+ })
+
+ def test_number(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.number, 7)
+
+ def test_name(self):
+ container = Container.from_ps(None,
+ self.container_dict,
+ has_been_inspected=True)
+ self.assertEqual(container.name, "composetest_db_1")
+
+ def test_name_without_project(self):
+ self.container_dict['Name'] = "/composetest_web_7"
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.name_without_project, "web_7")
+
+ def test_name_without_project_custom_container_name(self):
+ self.container_dict['Name'] = "/custom_name_of_container"
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.name_without_project, "custom_name_of_container")
+
+ def test_inspect_if_not_inspected(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+ container = Container(mock_client, dict(Id="the_id"))
+
+ container.inspect_if_not_inspected()
+ mock_client.inspect_container.assert_called_once_with("the_id")
+ self.assertEqual(container.dictionary,
+ mock_client.inspect_container.return_value)
+ self.assertTrue(container.has_been_inspected)
+
+ container.inspect_if_not_inspected()
+ self.assertEqual(mock_client.inspect_container.call_count, 1)
+
+ def test_human_readable_ports_none(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ self.assertEqual(container.human_readable_ports, '')
+
+ def test_human_readable_ports_public_and_private(self):
+ self.container_dict['NetworkSettings']['Ports'].update({
+ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
+ "45453/tcp": [],
+ })
+ container = Container(None, self.container_dict, has_been_inspected=True)
+
+ expected = "45453/tcp, 0.0.0.0:49197->45454/tcp"
+ self.assertEqual(container.human_readable_ports, expected)
+
+ def test_get_local_port(self):
+ self.container_dict['NetworkSettings']['Ports'].update({
+ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
+ })
+ container = Container(None, self.container_dict, has_been_inspected=True)
+
+ self.assertEqual(
+ container.get_local_port(45454, protocol='tcp'),
+ '0.0.0.0:49197')
+
+ def test_get(self):
+ container = Container(None, {
+ "Status": "Up 8 seconds",
+ "HostConfig": {
+ "VolumesFrom": ["volume_id"]
+ },
+ }, has_been_inspected=True)
+
+ self.assertEqual(container.get('Status'), "Up 8 seconds")
+ self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"])
+ self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None)
+
+ def test_short_id(self):
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ assert container.short_id == self.container_id[:12]
+
+ def test_has_api_logs(self):
+ container_dict = {
+ 'HostConfig': {
+ 'LogConfig': {
+ 'Type': 'json-file'
+ }
+ }
+ }
+
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'none'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'syslog'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'journald'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'foobar'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+
+class GetContainerNameTestCase(unittest.TestCase):
+
+ def test_get_container_name(self):
+ self.assertIsNone(get_container_name({}))
+ self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1')
+ self.assertEqual(
+ get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}),
+ 'myproject_db_1')
+ self.assertEqual(
+ get_container_name({
+ 'Names': [
+ '/swarm-host-1/myproject_db_1',
+ '/swarm-host-1/myproject_web_1/db'
+ ]
+ }),
+ 'myproject_db_1'
+ )
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
new file mode 100644
index 00000000..b27339af
--- /dev/null
+++ b/tests/unit/network_test.py
@@ -0,0 +1,161 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from .. import mock
+from .. import unittest
+from compose.network import check_remote_network_config
+from compose.network import Network
+from compose.network import NetworkConfigChangedError
+
+
+class NetworkTest(unittest.TestCase):
+ def test_check_remote_network_config_success(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ labels = {
+ 'com.project.tests.istest': 'true',
+ 'com.project.sound.track': 'way out of here',
+ }
+ remote_labels = labels.copy()
+ remote_labels.update({
+ 'com.docker.compose.project': 'compose_test',
+ 'com.docker.compose.network': 'net1',
+ })
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge',
+ options, enable_ipv6=True, ipam=ipam_config,
+ labels=labels
+ )
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Options': options,
+ 'EnableIPv6': True,
+ 'Internal': False,
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ },
+ 'Labels': remote_labels
+ },
+ net
+ )
+
+ def test_check_remote_network_config_whitelist(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ remote_options = {
+ 'com.docker.network.driver.overlay.vxlanid_list': '257',
+ 'com.docker.network.driver.foo': 'bar',
+ 'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b',
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'overlay',
+ options
+ )
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': remote_options}, net
+ )
+
+ def test_check_remote_network_config_driver_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config(
+ {'Driver': 'bridge', 'Options': {}}, net
+ )
+
+ assert 'driver has changed' in str(e.value)
+
+ def test_check_remote_network_config_options_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config({'Driver': 'overlay', 'Options': {
+ 'com.docker.network.driver.foo': 'baz'
+ }}, net)
+
+ assert 'option "com.docker.network.driver.foo" has changed' in str(e.value)
+
+ def test_check_remote_network_config_null_remote(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': None}, net
+ )
+
+ def test_check_remote_network_config_null_remote_ipam_options(self):
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge', ipam=ipam_config,
+ )
+
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ 'Options': None
+ },
+ },
+ net
+ )
+
+ def test_check_remote_network_labels_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay', labels={
+ 'com.project.touhou.character': 'sakuya.izayoi'
+ })
+ remote = {
+ 'Driver': 'overlay',
+ 'Options': None,
+ 'Labels': {
+ 'com.docker.compose.network': 'net1',
+ 'com.docker.compose.project': 'compose_test',
+ 'com.project.touhou.character': 'marisa.kirisame',
+ }
+ }
+ with mock.patch('compose.network.log') as mock_log:
+ check_remote_network_config(remote, net)
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warn.mock_calls[0]
+ assert 'label "com.project.touhou.character" has changed' in args[0]
diff --git a/tests/unit/parallel_test.py b/tests/unit/parallel_test.py
new file mode 100644
index 00000000..3a60f01a
--- /dev/null
+++ b/tests/unit/parallel_test.py
@@ -0,0 +1,163 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from threading import Lock
+
+import six
+from docker.errors import APIError
+
+from compose.parallel import parallel_execute
+from compose.parallel import parallel_execute_iter
+from compose.parallel import ParallelStreamWriter
+from compose.parallel import UpstreamError
+
+
+web = 'web'
+db = 'db'
+data_volume = 'data_volume'
+cache = 'cache'
+
+objects = [web, db, data_volume, cache]
+
+deps = {
+ web: [db, cache],
+ db: [data_volume],
+ data_volume: [],
+ cache: [],
+}
+
+
+def get_deps(obj):
+ return [(dep, None) for dep in deps[obj]]
+
+
+def test_parallel_execute():
+ results, errors = parallel_execute(
+ objects=[1, 2, 3, 4, 5],
+ func=lambda x: x * 2,
+ get_name=six.text_type,
+ msg="Doubling",
+ )
+
+ assert sorted(results) == [2, 4, 6, 8, 10]
+ assert errors == {}
+
+
+def test_parallel_execute_with_limit():
+ limit = 1
+ tasks = 20
+ lock = Lock()
+
+ def f(obj):
+ locked = lock.acquire(False)
+ # we should always get the lock because we're the only thread running
+ assert locked
+ lock.release()
+ return None
+
+ results, errors = parallel_execute(
+ objects=list(range(tasks)),
+ func=f,
+ get_name=six.text_type,
+ msg="Testing",
+ limit=limit,
+ )
+
+ assert results == tasks * [None]
+ assert errors == {}
+
+
+def test_parallel_execute_with_deps():
+ log = []
+
+ def process(x):
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert sorted(log) == sorted(objects)
+
+ assert log.index(data_volume) < log.index(db)
+ assert log.index(db) < log.index(web)
+ assert log.index(cache) < log.index(web)
+
+
+def test_parallel_execute_with_upstream_errors():
+ log = []
+
+ def process(x):
+ if x is data_volume:
+ raise APIError(None, None, "Something went wrong")
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert log == [cache]
+
+ events = [
+ (obj, result, type(exception))
+ for obj, result, exception
+ in parallel_execute_iter(objects, process, get_deps, None)
+ ]
+
+ assert (cache, None, type(None)) in events
+ assert (data_volume, None, APIError) in events
+ assert (db, None, UpstreamError) in events
+ assert (web, None, UpstreamError) in events
+
+
+def test_parallel_execute_alignment(capsys):
+ results, errors = parallel_execute(
+ objects=["short", "a very long name"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Aligning",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ a, b = err.split('\n')[:2]
+ assert a.index('...') == b.index('...')
+
+
+def test_parallel_execute_ansi(capsys):
+ ParallelStreamWriter.set_noansi(value=False)
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" in err
+
+
+def test_parallel_execute_noansi(capsys):
+ ParallelStreamWriter.set_noansi()
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" not in err
diff --git a/tests/unit/progress_stream_test.py b/tests/unit/progress_stream_test.py
new file mode 100644
index 00000000..c0cb906d
--- /dev/null
+++ b/tests/unit/progress_stream_test.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from six import StringIO
+
+from compose import progress_stream
+from tests import unittest
+
+
+class ProgressStreamTestCase(unittest.TestCase):
+ def test_stream_output(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'31019763, "start": 1413653874, "total": 62763875}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_div_zero(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'0, "start": 1413653874, "total": 0}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_null_total(self):
+ output = [
+ b'{"status": "Downloading", "progressDetail": {"current": '
+ b'0, "start": 1413653874, "total": null}, '
+ b'"progress": "..."}',
+ ]
+ events = progress_stream.stream_output(output, StringIO())
+ self.assertEqual(len(events), 1)
+
+ def test_stream_output_progress_event_tty(self):
+ events = [
+ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
+ ]
+
+ class TTYStringIO(StringIO):
+ def isatty(self):
+ return True
+
+ output = TTYStringIO()
+ events = progress_stream.stream_output(events, output)
+ self.assertTrue(len(output.getvalue()) > 0)
+
+ def test_stream_output_progress_event_no_tty(self):
+ events = [
+ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
+ ]
+ output = StringIO()
+
+ events = progress_stream.stream_output(events, output)
+ self.assertEqual(len(output.getvalue()), 0)
+
+ def test_stream_output_no_progress_event_no_tty(self):
+ events = [
+ b'{"status": "Pulling from library/xy", "id": "latest"}'
+ ]
+ output = StringIO()
+
+ events = progress_stream.stream_output(events, output)
+ self.assertTrue(len(output.getvalue()) > 0)
+
+
+def test_get_digest_from_push():
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"progressDetail": {}, "aux": {"Digest": digest}},
+ ]
+ assert progress_stream.get_digest_from_push(events) == digest
+
+
+def test_get_digest_from_pull():
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"status": "Digest: %s" % digest},
+ ]
+ assert progress_stream.get_digest_from_pull(events) == digest
diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py
new file mode 100644
index 00000000..e5f1a175
--- /dev/null
+++ b/tests/unit/project_test.py
@@ -0,0 +1,570 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import datetime
+
+import docker
+from docker.errors import NotFound
+
+from .. import mock
+from .. import unittest
+from compose.config.config import Config
+from compose.config.types import VolumeFromSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import LABEL_SERVICE
+from compose.container import Container
+from compose.project import Project
+from compose.service import ImageType
+from compose.service import Service
+
+
+class ProjectTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_from_config_v1(self):
+ config = Config(
+ version=V1,
+ services=[
+ {
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ },
+ {
+ 'name': 'db',
+ 'image': 'busybox:latest',
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config,
+ client=None,
+ )
+ self.assertEqual(len(project.services), 2)
+ self.assertEqual(project.get_service('web').name, 'web')
+ self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
+ self.assertEqual(project.get_service('db').name, 'db')
+ self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
+ self.assertFalse(project.networks.use_networking)
+
+ def test_from_config_v2(self):
+ config = Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ },
+ {
+ 'name': 'db',
+ 'image': 'busybox:latest',
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ )
+ project = Project.from_config('composetest', config, None)
+ self.assertEqual(len(project.services), 2)
+ self.assertTrue(project.networks.use_networking)
+
+ def test_get_service(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ client=None,
+ image="busybox:latest",
+ )
+ project = Project('test', [web], None)
+ self.assertEqual(project.get_service('web'), web)
+
+ def test_get_services_returns_all_services_without_args(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ )
+ project = Project('test', [web, console], None)
+ self.assertEqual(project.get_services(), [web, console])
+
+ def test_get_services_returns_listed_services_with_args(self):
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ )
+ project = Project('test', [web, console], None)
+ self.assertEqual(project.get_services(['console']), [console])
+
+ def test_get_services_with_include_links(self):
+ db = Service(
+ project='composetest',
+ name='db',
+ image='foo',
+ )
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ links=[(db, 'database')]
+ )
+ cache = Service(
+ project='composetest',
+ name='cache',
+ image='foo'
+ )
+ console = Service(
+ project='composetest',
+ name='console',
+ image='foo',
+ links=[(web, 'web')]
+ )
+ project = Project('test', [web, db, cache, console], None)
+ self.assertEqual(
+ project.get_services(['console'], include_deps=True),
+ [db, web, console]
+ )
+
+ def test_get_services_removes_duplicates_following_links(self):
+ db = Service(
+ project='composetest',
+ name='db',
+ image='foo',
+ )
+ web = Service(
+ project='composetest',
+ name='web',
+ image='foo',
+ links=[(db, 'database')]
+ )
+ project = Project('test', [web, db], None)
+ self.assertEqual(
+ project.get_services(['web', 'db'], include_deps=True),
+ [db, web]
+ )
+
+ def test_use_volumes_from_container(self):
+ container_id = 'aabbccddee'
+ container_dict = dict(Name='aaa', Id=container_id)
+ self.mock_client.inspect_container.return_value = container_dict
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('aaa', 'rw', 'container')]
+ }],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ assert project.get_service('test')._get_volumes_from() == [container_id + ":rw"]
+
+ def test_use_volumes_from_service_no_container(self):
+ container_name = 'test_vol_1'
+ self.mock_client.containers.return_value = [
+ {
+ "Name": container_name,
+ "Names": [container_name],
+ "Id": container_name,
+ "Image": 'busybox:latest'
+ }
+ ]
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'vol',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ assert project.get_service('test')._get_volumes_from() == [container_name + ":rw"]
+
+ def test_use_volumes_from_service_container(self):
+ container_ids = ['aabbccddee', '12345']
+
+ project = Project.from_config(
+ name='test',
+ client=None,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'vol',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ with mock.patch.object(Service, 'containers') as mock_return:
+ mock_return.return_value = [
+ mock.Mock(id=container_id, spec=Container)
+ for container_id in container_ids]
+ assert (
+ project.get_service('test')._get_volumes_from() ==
+ [container_ids[0] + ':rw']
+ )
+
+ def test_events(self):
+ services = [Service(name='web'), Service(name='db')]
+ project = Project('test', services, self.mock_client)
+ self.mock_client.events.return_value = iter([
+ {
+ 'status': 'create',
+ 'from': 'example/image',
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000002000,
+ },
+ {
+ 'status': 'attach',
+ 'from': 'example/image',
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000003000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/other',
+ 'id': 'bdbdbd',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000005000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/db',
+ 'id': 'ababa',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ {
+ 'status': 'destroy',
+ 'from': 'example/db',
+ 'id': 'eeeee',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ ])
+
+ def dt_with_microseconds(dt, us):
+ return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
+
+ def get_container(cid):
+ if cid == 'eeeee':
+ raise NotFound(None, None, "oops")
+ if cid == 'abcde':
+ name = 'web'
+ labels = {LABEL_SERVICE: name}
+ elif cid == 'ababa':
+ name = 'db'
+ labels = {LABEL_SERVICE: name}
+ else:
+ labels = {}
+ name = ''
+ return {
+ 'Id': cid,
+ 'Config': {'Labels': labels},
+ 'Name': '/project_%s_1' % name,
+ }
+
+ self.mock_client.inspect_container.side_effect = get_container
+
+ events = project.events()
+
+ events_list = list(events)
+ # Assert the return value is a generator
+ assert not list(events)
+ assert events_list == [
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'create',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'project_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 2),
+ 'container': Container(None, {'Id': 'abcde'}),
+ },
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'attach',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'project_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 3),
+ 'container': Container(None, {'Id': 'abcde'}),
+ },
+ {
+ 'type': 'container',
+ 'service': 'db',
+ 'action': 'create',
+ 'id': 'ababa',
+ 'attributes': {
+ 'name': 'project_db_1',
+ 'image': 'example/db',
+ },
+ 'time': dt_with_microseconds(1420092061, 4),
+ 'container': Container(None, {'Id': 'ababa'}),
+ },
+ ]
+
+ def test_net_unset(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V1,
+ services=[
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ }
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.id, None)
+ self.assertNotIn('NetworkMode', service._get_container_host_config({}))
+
+ def test_use_net_from_container(self):
+ container_id = 'aabbccddee'
+ container_dict = dict(Name='aaa', Id=container_id)
+ self.mock_client.inspect_container.return_value = container_dict
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'network_mode': 'container:aaa'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.mode, 'container:' + container_id)
+
+ def test_use_net_from_service(self):
+ container_name = 'test_aaa_1'
+ self.mock_client.containers.return_value = [
+ {
+ "Name": container_name,
+ "Names": [container_name],
+ "Id": container_name,
+ "Image": 'busybox:latest'
+ }
+ ]
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'aaa',
+ 'image': 'busybox:latest'
+ },
+ {
+ 'name': 'test',
+ 'image': 'busybox:latest',
+ 'network_mode': 'service:aaa'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ service = project.get_service('test')
+ self.assertEqual(service.network_mode.mode, 'container:' + container_name)
+
+ def test_uses_default_network_true(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'foo',
+ 'image': 'busybox:latest'
+ },
+ ],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ assert 'default' in project.networks.networks
+
+ def test_uses_default_network_false(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[
+ {
+ 'name': 'foo',
+ 'image': 'busybox:latest',
+ 'networks': {'custom': None}
+ },
+ ],
+ networks={'custom': {}},
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ assert 'default' not in project.networks.networks
+
+ def test_container_without_name(self):
+ self.mock_client.containers.return_value = [
+ {'Image': 'busybox:latest', 'Id': '1', 'Name': '1'},
+ {'Image': 'busybox:latest', 'Id': '2', 'Name': None},
+ {'Image': 'busybox:latest', 'Id': '3'},
+ ]
+ self.mock_client.inspect_container.return_value = {
+ 'Id': '1',
+ 'Config': {
+ 'Labels': {
+ LABEL_SERVICE: 'web',
+ },
+ },
+ }
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+ self.assertEqual([c.id for c in project.containers()], ['1'])
+
+ def test_down_with_no_resources(self):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }],
+ networks={'default': {}},
+ volumes={'data': {}},
+ secrets=None,
+ configs=None,
+ ),
+ )
+ self.mock_client.remove_network.side_effect = NotFound(None, None, 'oops')
+ self.mock_client.remove_volume.side_effect = NotFound(None, None, 'oops')
+
+ project.down(ImageType.all, True)
+ self.mock_client.remove_image.assert_called_once_with("busybox:latest")
+
+ def test_warning_in_swarm_mode(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 1
+
+ def test_no_warning_on_stop(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.stop()
+ assert fake_log.warn.call_count == 0
+
+ def test_no_warning_in_normal_mode(self):
+ self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'inactive'}}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 0
+
+ def test_no_warning_with_no_swarm_info(self):
+ self.mock_client.info.return_value = {}
+ project = Project('composetest', [], self.mock_client)
+
+ with mock.patch('compose.project.log') as fake_log:
+ project.up()
+ assert fake_log.warn.call_count == 0
diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py
new file mode 100644
index 00000000..7d61807b
--- /dev/null
+++ b/tests/unit/service_test.py
@@ -0,0 +1,1146 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import pytest
+from docker.errors import APIError
+
+from .. import mock
+from .. import unittest
+from compose.config.errors import DependencyError
+from compose.config.types import ServicePort
+from compose.config.types import ServiceSecret
+from compose.config.types import VolumeFromSpec
+from compose.config.types import VolumeSpec
+from compose.const import LABEL_CONFIG_HASH
+from compose.const import LABEL_ONE_OFF
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_SERVICE
+from compose.const import SECRETS_PATH
+from compose.container import Container
+from compose.project import OneOffFilter
+from compose.service import build_ulimits
+from compose.service import build_volume_binding
+from compose.service import BuildAction
+from compose.service import ContainerNetworkMode
+from compose.service import formatted_ports
+from compose.service import get_container_data_volumes
+from compose.service import ImageType
+from compose.service import merge_volume_bindings
+from compose.service import NeedsBuildError
+from compose.service import NetworkMode
+from compose.service import NoSuchImageError
+from compose.service import parse_repository_tag
+from compose.service import Service
+from compose.service import ServiceNetworkMode
+from compose.service import warn_on_masked_volume
+
+
+class ServiceTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_containers(self):
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+ self.mock_client.containers.return_value = []
+ self.assertEqual(list(service.containers()), [])
+
+ def test_containers_with_containers(self):
+ self.mock_client.containers.return_value = [
+ dict(Name=str(i), Image='foo', Id=i) for i in range(3)
+ ]
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+ self.assertEqual([c.id for c in service.containers()], list(range(3)))
+
+ expected_labels = [
+ '{0}=myproject'.format(LABEL_PROJECT),
+ '{0}=db'.format(LABEL_SERVICE),
+ '{0}=False'.format(LABEL_ONE_OFF),
+ ]
+
+ self.mock_client.containers.assert_called_once_with(
+ all=False,
+ filters={'label': expected_labels})
+
+ def test_container_without_name(self):
+ self.mock_client.containers.return_value = [
+ {'Image': 'foo', 'Id': '1', 'Name': '1'},
+ {'Image': 'foo', 'Id': '2', 'Name': None},
+ {'Image': 'foo', 'Id': '3'},
+ ]
+ service = Service('db', self.mock_client, 'myproject', image='foo')
+
+ self.assertEqual([c.id for c in service.containers()], ['1'])
+ self.assertEqual(service._next_container_number(), 2)
+ self.assertEqual(service.get_container(1).id, '1')
+
+ def test_get_volumes_from_container(self):
+ container_id = 'aabbccddee'
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[
+ VolumeFromSpec(
+ mock.Mock(id=container_id, spec=Container),
+ 'rw',
+ 'container')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+
+ def test_get_volumes_from_container_read_only(self):
+ container_id = 'aabbccddee'
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[
+ VolumeFromSpec(
+ mock.Mock(id=container_id, spec=Container),
+ 'ro',
+ 'container')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':ro'])
+
+ def test_get_volumes_from_service_container_exists(self):
+ container_ids = ['aabbccddee', '12345']
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = [
+ mock.Mock(id=container_id, spec=Container)
+ for container_id in container_ids
+ ]
+ service = Service(
+ 'test',
+ volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')],
+ image='foo')
+
+ self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"])
+
+ def test_get_volumes_from_service_container_exists_with_flags(self):
+ for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
+ container_ids = ['aabbccddee:' + mode, '12345:' + mode]
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = [
+ mock.Mock(id=container_id.split(':')[0], spec=Container)
+ for container_id in container_ids
+ ]
+ service = Service(
+ 'test',
+ volumes_from=[VolumeFromSpec(from_service, mode, 'service')],
+ image='foo')
+
+ self.assertEqual(service._get_volumes_from(), [container_ids[0]])
+
+ def test_get_volumes_from_service_no_container(self):
+ container_id = 'abababab'
+ from_service = mock.create_autospec(Service)
+ from_service.containers.return_value = []
+ from_service.create_container.return_value = mock.Mock(
+ id=container_id,
+ spec=Container)
+ service = Service(
+ 'test',
+ image='foo',
+ volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')])
+
+ self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+ from_service.create_container.assert_called_once_with()
+
+ def test_split_domainname_none(self):
+ service = Service('foo', image='foo', hostname='name', client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertFalse('domainname' in opts, 'domainname')
+
+ def test_memory_swap_limit(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ mem_limit=1000000000,
+ memswap_limit=2000000000)
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['mem_limit'],
+ 1000000000
+ )
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['memswap_limit'],
+ 2000000000
+ )
+
+ def test_self_reference_external_link(self):
+ service = Service(
+ name='foo',
+ external_links=['default_foo_1']
+ )
+ with self.assertRaises(DependencyError):
+ service.get_container_name(1)
+
+ def test_mem_reservation(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ mem_reservation='512m'
+ )
+ service._get_container_create_options({'some': 'overrides'}, 1)
+ assert self.mock_client.create_host_config.called is True
+ assert self.mock_client.create_host_config.call_args[1]['mem_reservation'] == '512m'
+
+ def test_cgroup_parent(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ cgroup_parent='test')
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['cgroup_parent'],
+ 'test'
+ )
+
+ def test_log_opt(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ log_opt = {'syslog-address': 'tcp://192.168.0.42:123'}
+ logging = {'driver': 'syslog', 'options': log_opt}
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ log_driver='syslog',
+ logging=logging)
+ service._get_container_create_options({'some': 'overrides'}, 1)
+
+ self.assertTrue(self.mock_client.create_host_config.called)
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['log_config'],
+ {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
+ )
+
+ def test_split_domainname_fqdn(self):
+ service = Service(
+ 'foo',
+ hostname='name.domain.tld',
+ image='foo',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_split_domainname_both(self):
+ service = Service(
+ 'foo',
+ hostname='name',
+ image='foo',
+ domainname='domain.tld',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_split_domainname_weird(self):
+ service = Service(
+ 'foo',
+ hostname='name.sub',
+ domainname='domain.tld',
+ image='foo',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
+ self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+
+ def test_no_default_hostname_when_not_using_networking(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ use_networking=False,
+ client=self.mock_client,
+ )
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ self.assertIsNone(opts.get('hostname'))
+
+ def test_get_container_create_options_with_name_option(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ client=self.mock_client,
+ container_name='foo1')
+ name = 'the_new_name'
+ opts = service._get_container_create_options(
+ {'name': name},
+ 1,
+ one_off=OneOffFilter.only)
+ self.assertEqual(opts['name'], name)
+
+ def test_get_container_create_options_does_not_mutate_options(self):
+ labels = {'thing': 'real'}
+ environment = {'also': 'real'}
+ service = Service(
+ 'foo',
+ image='foo',
+ labels=dict(labels),
+ client=self.mock_client,
+ environment=dict(environment),
+ )
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {}})
+ prev_container.get.return_value = None
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+
+ self.assertEqual(service.options['labels'], labels)
+ self.assertEqual(service.options['environment'], environment)
+
+ self.assertEqual(
+ opts['labels'][LABEL_CONFIG_HASH],
+ '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa')
+ assert opts['environment'] == ['also=real']
+
+ def test_get_container_create_options_sets_affinity_with_binds(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ client=self.mock_client,
+ )
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {'Volumes': ['/data']}})
+
+ def container_get(key):
+ return {
+ 'Mounts': [
+ {
+ 'Destination': '/data',
+ 'Source': '/some/path',
+ 'Name': 'abab1234',
+ },
+ ]
+ }.get(key, None)
+
+ prev_container.get.side_effect = container_get
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+
+ assert opts['environment'] == ['affinity:container==ababab']
+
+ def test_get_container_create_options_no_affinity_without_binds(self):
+ service = Service('foo', image='foo', client=self.mock_client)
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ prev_container = mock.Mock(
+ id='ababab',
+ image_config={'ContainerConfig': {}})
+ prev_container.get.return_value = None
+
+ opts = service._get_container_create_options(
+ {},
+ 1,
+ previous_container=prev_container)
+ assert opts['environment'] == []
+
+ def test_get_container_not_found(self):
+ self.mock_client.containers.return_value = []
+ service = Service('foo', client=self.mock_client, image='foo')
+
+ self.assertRaises(ValueError, service.get_container)
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_get_container(self, mock_container_class):
+ container_dict = dict(Name='default_foo_2')
+ self.mock_client.containers.return_value = [container_dict]
+ service = Service('foo', image='foo', client=self.mock_client)
+
+ container = service.get_container(number=2)
+ self.assertEqual(container, mock_container_class.from_ps.return_value)
+ mock_container_class.from_ps.assert_called_once_with(
+ self.mock_client, container_dict)
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image(self, mock_log):
+ service = Service('foo', client=self.mock_client, image='someimage:sometag')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'someimage',
+ tag='sometag',
+ stream=True)
+ mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
+
+ def test_pull_image_no_tag(self):
+ service = Service('foo', client=self.mock_client, image='ababab')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'ababab',
+ tag='latest',
+ stream=True)
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image_digest(self, mock_log):
+ service = Service('foo', client=self.mock_client, image='someimage@sha256:1234')
+ service.pull()
+ self.mock_client.pull.assert_called_once_with(
+ 'someimage',
+ tag='sha256:1234',
+ stream=True)
+ mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...')
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_recreate_container(self, _):
+ mock_container = mock.create_autospec(Container)
+ service = Service('foo', client=self.mock_client, image='someimage')
+ service.image = lambda: {'Id': 'abc123'}
+ new_container = service.recreate_container(mock_container)
+
+ mock_container.stop.assert_called_once_with(timeout=10)
+ mock_container.rename_to_tmp_name.assert_called_once_with()
+
+ new_container.start.assert_called_once_with()
+ mock_container.remove.assert_called_once_with()
+
+ @mock.patch('compose.service.Container', autospec=True)
+ def test_recreate_container_with_timeout(self, _):
+ mock_container = mock.create_autospec(Container)
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+ service = Service('foo', client=self.mock_client, image='someimage')
+ service.recreate_container(mock_container, timeout=1)
+
+ mock_container.stop.assert_called_once_with(timeout=1)
+
+ def test_parse_repository_tag(self):
+ self.assertEqual(parse_repository_tag("root"), ("root", "", ":"))
+ self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":"))
+ self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":"))
+ self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":"))
+ self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":"))
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo:tag"),
+ ("url:5000/repo", "tag", ":"))
+ self.assertEqual(
+ parse_repository_tag("root@sha256:digest"),
+ ("root", "sha256:digest", "@"))
+ self.assertEqual(
+ parse_repository_tag("user/repo@sha256:digest"),
+ ("user/repo", "sha256:digest", "@"))
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo@sha256:digest"),
+ ("url:5000/repo", "sha256:digest", "@"))
+
+ def test_create_container(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.side_effect = [
+ NoSuchImageError,
+ {'Id': 'abc123'},
+ ]
+ self.mock_client.build.return_value = [
+ '{"stream": "Successfully built abcd"}',
+ ]
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ service.create_container()
+ assert mock_log.warn.called
+ _, args, _ = mock_log.warn.mock_calls[0]
+ assert 'was built because it did not already exist' in args[0]
+
+ self.mock_client.build.assert_called_once_with(
+ tag='default_foo',
+ dockerfile=None,
+ stream=True,
+ path='.',
+ pull=False,
+ forcerm=False,
+ nocache=False,
+ rm=True,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None,
+ )
+
+ def test_ensure_image_exists_no_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+
+ service.ensure_image_exists(do_build=BuildAction.skip)
+ assert not self.mock_client.build.called
+
+ def test_ensure_image_exists_no_build_but_needs_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.side_effect = NoSuchImageError
+ with pytest.raises(NeedsBuildError):
+ service.ensure_image_exists(do_build=BuildAction.skip)
+
+ def test_ensure_image_exists_force_build(self):
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
+ self.mock_client.build.return_value = [
+ '{"stream": "Successfully built abcd"}',
+ ]
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ service.ensure_image_exists(do_build=BuildAction.force)
+
+ assert not mock_log.warn.called
+ self.mock_client.build.assert_called_once_with(
+ tag='default_foo',
+ dockerfile=None,
+ stream=True,
+ path='.',
+ pull=False,
+ forcerm=False,
+ nocache=False,
+ rm=True,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None
+ )
+
+ def test_build_does_not_pull(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service('foo', client=self.mock_client, build={'context': '.'})
+ service.build()
+
+ self.assertEqual(self.mock_client.build.call_count, 1)
+ self.assertFalse(self.mock_client.build.call_args[1]['pull'])
+
+ def test_build_with_override_build_args(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ build_args = {
+ 'arg1': 'arg1_new_value',
+ }
+ service = Service('foo', client=self.mock_client,
+ build={'context': '.', 'args': {'arg1': 'arg1', 'arg2': 'arg2'}})
+ service.build(build_args_override=build_args)
+
+ called_build_args = self.mock_client.build.call_args[1]['buildargs']
+
+ assert called_build_args['arg1'] == build_args['arg1']
+ assert called_build_args['arg2'] == 'arg2'
+
+ def test_config_dict(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ service = Service(
+ 'foo',
+ image='example.com/foo',
+ client=self.mock_client,
+ network_mode=ServiceNetworkMode(Service('other')),
+ networks={'default': None},
+ links=[(Service('one'), 'one')],
+ volumes_from=[VolumeFromSpec(Service('two'), 'rw', 'service')])
+
+ config_dict = service.config_dict()
+ expected = {
+ 'image_id': 'abcd',
+ 'options': {'image': 'example.com/foo'},
+ 'links': [('one', 'one')],
+ 'net': 'other',
+ 'networks': {'default': None},
+ 'volumes_from': [('two', 'rw')],
+ }
+ assert config_dict == expected
+
+ def test_config_dict_with_network_mode_from_container(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ container = Container(
+ self.mock_client,
+ {'Id': 'aaabbb', 'Name': '/foo_1'})
+ service = Service(
+ 'foo',
+ image='example.com/foo',
+ client=self.mock_client,
+ network_mode=ContainerNetworkMode(container))
+
+ config_dict = service.config_dict()
+ expected = {
+ 'image_id': 'abcd',
+ 'options': {'image': 'example.com/foo'},
+ 'links': [],
+ 'networks': {},
+ 'net': 'aaabbb',
+ 'volumes_from': [],
+ }
+ assert config_dict == expected
+
+ def test_remove_image_none(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert not web.remove_image(ImageType.none)
+ assert not self.mock_client.remove_image.called
+
+ def test_remove_image_local_with_image_name_doesnt_remove(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert not web.remove_image(ImageType.local)
+ assert not self.mock_client.remove_image.called
+
+ def test_remove_image_local_without_image_name_does_remove(self):
+ web = Service('web', build='.', client=self.mock_client)
+ assert web.remove_image(ImageType.local)
+ self.mock_client.remove_image.assert_called_once_with(web.image_name)
+
+ def test_remove_image_all_does_remove(self):
+ web = Service('web', image='example', client=self.mock_client)
+ assert web.remove_image(ImageType.all)
+ self.mock_client.remove_image.assert_called_once_with(web.image_name)
+
+ def test_remove_image_with_error(self):
+ self.mock_client.remove_image.side_effect = error = APIError(
+ message="testing",
+ response={},
+ explanation="Boom")
+
+ web = Service('web', image='example', client=self.mock_client)
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ assert not web.remove_image(ImageType.all)
+ mock_log.error.assert_called_once_with(
+ "Failed to remove image for service %s: %s", web.name, error)
+
+ def test_specifies_host_port_with_no_ports(self):
+ service = Service(
+ 'foo',
+ image='foo')
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_container_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["2000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["1000:2000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_host_ip_no_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1::2000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_ip_and_port(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1:1000:2000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_container_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["2000-3000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["1000-2000:2000-3000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_specifies_host_port_with_host_ip_no_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1::2000-3000"])
+ self.assertEqual(service.specifies_host_port(), False)
+
+ def test_specifies_host_port_with_host_ip_and_port_range(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ ports=["127.0.0.1:1000-2000:2000-3000"])
+ self.assertEqual(service.specifies_host_port(), True)
+
+ def test_image_name_from_config(self):
+ image_name = 'example/web:latest'
+ service = Service('foo', image=image_name)
+ assert service.image_name == image_name
+
+ def test_image_name_default(self):
+ service = Service('foo', project='testing')
+ assert service.image_name == 'testing_foo'
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_only_log_warning_when_host_ports_clash(self, mock_log):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ name = 'foo'
+ service = Service(
+ name,
+ client=self.mock_client,
+ ports=["8080:80"])
+
+ service.scale(0)
+ self.assertFalse(mock_log.warn.called)
+
+ service.scale(1)
+ self.assertFalse(mock_log.warn.called)
+
+ service.scale(2)
+ mock_log.warn.assert_called_once_with(
+ 'The "{}" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'.format(name))
+
+
+class TestServiceNetwork(object):
+
+ def test_connect_container_to_networks_short_aliase_exists(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+ service = Service(
+ 'db',
+ mock_client,
+ 'myproject',
+ image='foo',
+ networks={'project_default': {}})
+ container = Container(
+ None,
+ {
+ 'Id': 'abcdef',
+ 'NetworkSettings': {
+ 'Networks': {
+ 'project_default': {
+ 'Aliases': ['analias', 'abcdef'],
+ },
+ },
+ },
+ },
+ True)
+ service.connect_container_to_networks(container)
+
+ assert not mock_client.disconnect_container_from_network.call_count
+ assert not mock_client.connect_container_to_network.call_count
+
+
+def sort_by_name(dictionary_list):
+ return sorted(dictionary_list, key=lambda k: k['name'])
+
+
+class BuildUlimitsTestCase(unittest.TestCase):
+
+ def test_build_ulimits_with_dict(self):
+ ulimits = build_ulimits(
+ {
+ 'nofile': {'soft': 10000, 'hard': 20000},
+ 'nproc': {'soft': 65535, 'hard': 65535}
+ }
+ )
+ expected = [
+ {'name': 'nofile', 'soft': 10000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+ def test_build_ulimits_with_ints(self):
+ ulimits = build_ulimits({'nofile': 20000, 'nproc': 65535})
+ expected = [
+ {'name': 'nofile', 'soft': 20000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+ def test_build_ulimits_with_integers_and_dicts(self):
+ ulimits = build_ulimits(
+ {
+ 'nproc': 65535,
+ 'nofile': {'soft': 10000, 'hard': 20000}
+ }
+ )
+ expected = [
+ {'name': 'nofile', 'soft': 10000, 'hard': 20000},
+ {'name': 'nproc', 'soft': 65535, 'hard': 65535}
+ ]
+ assert sort_by_name(ulimits) == sort_by_name(expected)
+
+
+class NetTestCase(unittest.TestCase):
+
+ def test_network_mode(self):
+ network_mode = NetworkMode('host')
+ self.assertEqual(network_mode.id, 'host')
+ self.assertEqual(network_mode.mode, 'host')
+ self.assertEqual(network_mode.service_name, None)
+
+ def test_network_mode_container(self):
+ container_id = 'abcd'
+ network_mode = ContainerNetworkMode(Container(None, {'Id': container_id}))
+ self.assertEqual(network_mode.id, container_id)
+ self.assertEqual(network_mode.mode, 'container:' + container_id)
+ self.assertEqual(network_mode.service_name, None)
+
+ def test_network_mode_service(self):
+ container_id = 'bbbb'
+ service_name = 'web'
+ mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.containers.return_value = [
+ {'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
+ ]
+
+ service = Service(name=service_name, client=mock_client)
+ network_mode = ServiceNetworkMode(service)
+
+ self.assertEqual(network_mode.id, service_name)
+ self.assertEqual(network_mode.mode, 'container:' + container_id)
+ self.assertEqual(network_mode.service_name, service_name)
+
+ def test_network_mode_service_no_containers(self):
+ service_name = 'web'
+ mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.containers.return_value = []
+
+ service = Service(name=service_name, client=mock_client)
+ network_mode = ServiceNetworkMode(service)
+
+ self.assertEqual(network_mode.id, service_name)
+ self.assertEqual(network_mode.mode, None)
+ self.assertEqual(network_mode.service_name, service_name)
+
+
+class ServicePortsTest(unittest.TestCase):
+ def test_formatted_ports(self):
+ ports = [
+ '3000',
+ '0.0.0.0:4025-4030:23000-23005',
+ ServicePort(6000, None, None, None, None),
+ ServicePort(8080, 8080, None, None, None),
+ ServicePort('20000', '20000', 'udp', 'ingress', None),
+ ServicePort(30000, '30000', 'tcp', None, '127.0.0.1'),
+ ]
+ formatted = formatted_ports(ports)
+ assert ports[0] in formatted
+ assert ports[1] in formatted
+ assert '6000/tcp' in formatted
+ assert '8080:8080/tcp' in formatted
+ assert '20000:20000/udp' in formatted
+ assert '127.0.0.1:30000:30000/tcp' in formatted
+
+
+def build_mount(destination, source, mode='rw'):
+ return {'Source': source, 'Destination': destination, 'Mode': mode}
+
+
+class ServiceVolumesTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_build_volume_binding(self):
+ binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
+ assert binding == ('/inside', '/outside:/inside:rw')
+
+ def test_get_container_data_volumes(self):
+ options = [VolumeSpec.parse(v) for v in [
+ '/host/volume:/host/volume:ro',
+ '/new/volume',
+ '/existing/volume',
+ 'named:/named/vol',
+ '/dev/tmpfs'
+ ]]
+
+ self.mock_client.inspect_image.return_value = {
+ 'ContainerConfig': {
+ 'Volumes': {
+ '/mnt/image/data': {},
+ }
+ }
+ }
+ container = Container(self.mock_client, {
+ 'Image': 'ababab',
+ 'Mounts': [
+ {
+ 'Source': '/host/volume',
+ 'Destination': '/host/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'hostvolume',
+ }, {
+ 'Source': '/var/lib/docker/aaaaaaaa',
+ 'Destination': '/existing/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'existingvolume',
+ }, {
+ 'Source': '/var/lib/docker/bbbbbbbb',
+ 'Destination': '/removed/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'removedvolume',
+ }, {
+ 'Source': '/var/lib/docker/cccccccc',
+ 'Destination': '/mnt/image/data',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'imagedata',
+ },
+ ]
+ }, has_been_inspected=True)
+
+ expected = [
+ VolumeSpec.parse('existingvolume:/existing/volume:rw'),
+ VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
+ ]
+
+ volumes = get_container_data_volumes(container, options, ['/dev/tmpfs'])
+ assert sorted(volumes) == sorted(expected)
+
+ def test_merge_volume_bindings(self):
+ options = [
+ VolumeSpec.parse(v, True) for v in [
+ '/host/volume:/host/volume:ro',
+ '/host/rw/volume:/host/rw/volume',
+ '/new/volume',
+ '/existing/volume',
+ '/dev/tmpfs'
+ ]
+ ]
+
+ self.mock_client.inspect_image.return_value = {
+ 'ContainerConfig': {'Volumes': {}}
+ }
+
+ previous_container = Container(self.mock_client, {
+ 'Id': 'cdefab',
+ 'Image': 'ababab',
+ 'Mounts': [{
+ 'Source': '/var/lib/docker/aaaaaaaa',
+ 'Destination': '/existing/volume',
+ 'Mode': '',
+ 'RW': True,
+ 'Name': 'existingvolume',
+ }],
+ }, has_been_inspected=True)
+
+ expected = [
+ '/host/volume:/host/volume:ro',
+ '/host/rw/volume:/host/rw/volume:rw',
+ 'existingvolume:/existing/volume:rw',
+ ]
+
+ binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container)
+ assert sorted(binds) == sorted(expected)
+ assert affinity == {'affinity:container': '=cdefab'}
+
+ def test_mount_same_host_path_to_two_volumes(self):
+ service = Service(
+ 'web',
+ image='busybox',
+ volumes=[
+ VolumeSpec.parse('/host/path:/data1', True),
+ VolumeSpec.parse('/host/path:/data2', True),
+ ],
+ client=self.mock_client,
+ )
+
+ self.mock_client.inspect_image.return_value = {
+ 'Id': 'ababab',
+ 'ContainerConfig': {
+ 'Volumes': {}
+ }
+ }
+
+ service._get_container_create_options(
+ override_options={},
+ number=1,
+ )
+
+ self.assertEqual(
+ set(self.mock_client.create_host_config.call_args[1]['binds']),
+ set([
+ '/host/path:/data1:rw',
+ '/host/path:/data2:rw',
+ ]),
+ )
+
+ def test_get_container_create_options_with_different_host_path_in_container_json(self):
+ service = Service(
+ 'web',
+ image='busybox',
+ volumes=[VolumeSpec.parse('/host/path:/data')],
+ client=self.mock_client,
+ )
+ volume_name = 'abcdefff1234'
+
+ self.mock_client.inspect_image.return_value = {
+ 'Id': 'ababab',
+ 'ContainerConfig': {
+ 'Volumes': {
+ '/data': {},
+ }
+ }
+ }
+
+ self.mock_client.inspect_container.return_value = {
+ 'Id': '123123123',
+ 'Image': 'ababab',
+ 'Mounts': [
+ {
+ 'Destination': '/data',
+ 'Source': '/mnt/sda1/host/path',
+ 'Mode': '',
+ 'RW': True,
+ 'Driver': 'local',
+ 'Name': volume_name,
+ },
+ ]
+ }
+
+ service._get_container_create_options(
+ override_options={},
+ number=1,
+ previous_container=Container(self.mock_client, {'Id': '123123123'}),
+ )
+
+ assert (
+ self.mock_client.create_host_config.call_args[1]['binds'] ==
+ ['{}:/data:rw'.format(volume_name)]
+ )
+
+ def test_warn_on_masked_volume_no_warning_when_no_container_volumes(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = []
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_warn_on_masked_volume_when_masked(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = [
+ VolumeSpec('/var/lib/docker/path', '/path', 'rw'),
+ VolumeSpec('/var/lib/docker/path', '/other', 'rw'),
+ ]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+
+ def test_warn_on_masked_no_warning_with_same_path(self):
+ volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
+ container_volumes = [VolumeSpec('/home/user', '/path', 'rw')]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_warn_on_masked_no_warning_with_container_only_option(self):
+ volumes_option = [VolumeSpec(None, '/path', 'rw')]
+ container_volumes = [
+ VolumeSpec('/var/lib/docker/volume/path', '/path', 'rw')
+ ]
+ service = 'service_name'
+
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ warn_on_masked_volume(volumes_option, container_volumes, service)
+
+ assert not mock_log.warn.called
+
+ def test_create_with_special_volume_mode(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
+
+ self.mock_client.create_container.return_value = {'Id': 'containerid'}
+
+ volume = '/tmp:/foo:z'
+ Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ volumes=[VolumeSpec.parse(volume, True)],
+ ).create_container()
+
+ assert self.mock_client.create_container.call_count == 1
+ self.assertEqual(
+ self.mock_client.create_host_config.call_args[1]['binds'],
+ [volume])
+
+
+class ServiceSecretTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_get_secret_volumes(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': 'b.txt'}),
+ 'file': 'a.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
+
+ def test_get_secret_volumes_abspath(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': '/d.txt'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == secret1['secret'].target
+
+ def test_get_secret_volumes_no_target(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
diff --git a/tests/unit/split_buffer_test.py b/tests/unit/split_buffer_test.py
new file mode 100644
index 00000000..c41ea27d
--- /dev/null
+++ b/tests/unit/split_buffer_test.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .. import unittest
+from compose.utils import split_buffer
+
+
+class SplitBufferTest(unittest.TestCase):
+ def test_single_line_chunks(self):
+ def reader():
+ yield b'abc\n'
+ yield b'def\n'
+ yield b'ghi\n'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi\n'])
+
+ def test_no_end_separator(self):
+ def reader():
+ yield b'abc\n'
+ yield b'def\n'
+ yield b'ghi'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi'])
+
+ def test_multiple_line_chunk(self):
+ def reader():
+ yield b'abc\ndef\nghi'
+
+ self.assert_produces(reader, ['abc\n', 'def\n', 'ghi'])
+
+ def test_chunked_line(self):
+ def reader():
+ yield b'a'
+ yield b'b'
+ yield b'c'
+ yield b'\n'
+ yield b'd'
+
+ self.assert_produces(reader, ['abc\n', 'd'])
+
+ def test_preserves_unicode_sequences_within_lines(self):
+ string = u"a\u2022c\n"
+
+ def reader():
+ yield string.encode('utf-8')
+
+ self.assert_produces(reader, [string])
+
+ def assert_produces(self, reader, expectations):
+ split = split_buffer(reader())
+
+ for (actual, expected) in zip(split, expectations):
+ self.assertEqual(type(actual), type(expected))
+ self.assertEqual(actual, expected)
diff --git a/tests/unit/timeparse_test.py b/tests/unit/timeparse_test.py
new file mode 100644
index 00000000..9915932c
--- /dev/null
+++ b/tests/unit/timeparse_test.py
@@ -0,0 +1,56 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import timeparse
+
+
+def test_milli():
+ assert timeparse.timeparse('5ms') == 0.005
+
+
+def test_milli_float():
+ assert timeparse.timeparse('50.5ms') == 0.0505
+
+
+def test_second_milli():
+ assert timeparse.timeparse('200s5ms') == 200.005
+
+
+def test_second_milli_micro():
+ assert timeparse.timeparse('200s5ms10us') == 200.00501
+
+
+def test_second():
+ assert timeparse.timeparse('200s') == 200
+
+
+def test_second_as_float():
+ assert timeparse.timeparse('20.5s') == 20.5
+
+
+def test_minute():
+ assert timeparse.timeparse('32m') == 1920
+
+
+def test_hour_minute():
+ assert timeparse.timeparse('2h32m') == 9120
+
+
+def test_minute_as_float():
+ assert timeparse.timeparse('1.5m') == 90
+
+
+def test_hour_minute_second():
+ assert timeparse.timeparse('5h34m56s') == 20096
+
+
+def test_invalid_with_space():
+ assert timeparse.timeparse('5h 34m 56s') is None
+
+
+def test_invalid_with_comma():
+ assert timeparse.timeparse('5h,34m,56s') is None
+
+
+def test_invalid_with_empty_string():
+ assert timeparse.timeparse('') is None
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
new file mode 100644
index 00000000..84becb97
--- /dev/null
+++ b/tests/unit/utils_test.py
@@ -0,0 +1,70 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import utils
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert utils.json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = utils.stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = utils.stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(utils.json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(utils.json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
+
+
+class TestParseBytes(object):
+ def test_parse_bytes(self):
+ assert utils.parse_bytes('123kb') == 123 * 1024
+ assert utils.parse_bytes(123) == 123
+ assert utils.parse_bytes('foobar') is None
+ assert utils.parse_bytes('123') == 123
diff --git a/tests/unit/volume_test.py b/tests/unit/volume_test.py
new file mode 100644
index 00000000..457d8558
--- /dev/null
+++ b/tests/unit/volume_test.py
@@ -0,0 +1,26 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import docker
+import pytest
+
+from compose import volume
+from tests import mock
+
+
+@pytest.fixture
+def mock_client():
+ return mock.create_autospec(docker.APIClient)
+
+
+class TestVolume(object):
+
+ def test_remove_local_volume(self, mock_client):
+ vol = volume.Volume(mock_client, 'foo', 'project')
+ vol.remove()
+ mock_client.remove_volume.assert_called_once_with('foo_project')
+
+ def test_remove_external_volume(self, mock_client):
+ vol = volume.Volume(mock_client, 'foo', 'project', external=True)
+ vol.remove()
+ assert not mock_client.remove_volume.called
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..e4f31ec8
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,54 @@
+[tox]
+envlist = py27,py34,pre-commit
+
+[testenv]
+usedevelop=True
+passenv =
+ LD_LIBRARY_PATH
+ DOCKER_HOST
+ DOCKER_CERT_PATH
+ DOCKER_TLS_VERIFY
+ DOCKER_VERSION
+ SWARM_SKIP_*
+ SWARM_ASSUME_MULTINODE
+setenv =
+ HOME=/tmp
+deps =
+ -rrequirements.txt
+ -rrequirements-dev.txt
+commands =
+ py.test -v \
+ --full-trace \
+ --cov=compose \
+ --cov-report html \
+ --cov-report term \
+ --cov-config=tox.ini \
+ {posargs:tests}
+
+[testenv:pre-commit]
+skip_install = True
+deps =
+ pre-commit
+commands =
+ pre-commit install
+ pre-commit run --all-files
+
+# Coverage configuration
+[run]
+branch = True
+
+[report]
+show_missing = true
+
+[html]
+directory = coverage-html
+# end coverage configuration
+
+[flake8]
+max-line-length = 105
+# Set this high for now
+max-complexity = 11
+exclude = compose/packages
+
+[pytest]
+addopts = --tb=short -rxs